summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--camera/Android.mk8
-rw-r--r--camera/CameraParameters.cpp342
-rw-r--r--include/camera/Camera.h3
-rw-r--r--include/camera/CameraParameters.h263
-rw-r--r--include/media/AudioParameter.h8
-rw-r--r--include/media/AudioSystem.h8
-rw-r--r--include/media/AudioTrack.h25
-rwxr-xr-x[-rw-r--r--]include/media/IAudioFlinger.h26
-rw-r--r--include/media/IDirectTrack.h93
-rw-r--r--include/media/IDirectTrackClient.h51
-rw-r--r--include/media/MediaPlayerInterface.h4
-rw-r--r--include/media/MediaProfiles.h14
-rw-r--r--include/media/mediarecorder.h15
-rw-r--r--include/media/stagefright/ACodec.h3
-rw-r--r--include/media/stagefright/AudioPlayer.h21
-rw-r--r--include/media/stagefright/AudioSource.h1
-rw-r--r--include/media/stagefright/ExtendedWriter.h143
-rw-r--r--include/media/stagefright/FMRadioSource.h64
-rwxr-xr-xinclude/media/stagefright/LPAPlayer.h291
-rw-r--r--include/media/stagefright/MediaDebug.h40
-rw-r--r--include/media/stagefright/MediaDefs.h1
-rw-r--r--include/media/stagefright/MediaExtractor.h2
-rw-r--r--include/media/stagefright/OMXCodec.h35
-rw-r--r--include/media/stagefright/QCOMXCodec.h101
-rw-r--r--include/media/stagefright/TunnelPlayer.h250
-rw-r--r--include/media/stagefright/WAVEWriter.h108
-rwxr-xr-xlibvideoeditor/lvpp/Android.mk7
-rw-r--r--media/libmedia/Android.mk30
-rw-r--r--media/libmedia/AudioParameter.cpp8
-rw-r--r--media/libmedia/AudioRecord.cpp12
-rw-r--r--media/libmedia/AudioSystem.cpp23
-rw-r--r--media/libmedia/AudioTrack.cpp286
-rw-r--r--media/libmedia/IAudioFlinger.cpp69
-rw-r--r--media/libmedia/IAudioFlingerClient.cpp10
-rw-r--r--media/libmedia/IDirectTrack.cpp178
-rw-r--r--media/libmedia/IDirectTrackClient.cpp69
-rw-r--r--media/libmedia/MediaProfiles.cpp30
-rw-r--r--media/libmedia/ToneGenerator.cpp6
-rw-r--r--media/libmediaplayerservice/Android.mk7
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.cpp152
-rw-r--r--media/libmediaplayerservice/MediaPlayerService.h11
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.cpp162
-rw-r--r--media/libmediaplayerservice/StagefrightRecorder.h9
-rw-r--r--media/libstagefright/ACodec.cpp41
-rwxr-xr-x[-rw-r--r--]media/libstagefright/Android.mk60
-rw-r--r--media/libstagefright/AudioPlayer.cpp7
-rw-r--r--media/libstagefright/AudioSource.cpp20
-rw-r--r--media/libstagefright/AwesomePlayer.cpp431
-rwxr-xr-xmedia/libstagefright/CameraSource.cpp13
-rw-r--r--media/libstagefright/DataSource.cpp6
-rw-r--r--media/libstagefright/ExtendedExtractor.cpp110
-rw-r--r--media/libstagefright/ExtendedWriter.cpp390
-rw-r--r--media/libstagefright/FMRadioSource.cpp201
-rwxr-xr-xmedia/libstagefright/LPAPlayer.cpp833
-rw-r--r--media/libstagefright/LPAPlayerALSA.cpp793
-rw-r--r--media/libstagefright/MPEG4Extractor.cpp85
-rwxr-xr-xmedia/libstagefright/MPEG4Writer.cpp79
-rw-r--r--media/libstagefright/MediaExtractor.cpp57
-rw-r--r--[-rwxr-xr-x]media/libstagefright/OMXCodec.cpp942
-rw-r--r--media/libstagefright/PCMExtractor.cpp302
-rw-r--r--media/libstagefright/QCMediaDefs.cpp55
-rw-r--r--media/libstagefright/QCOMXCodec.cpp592
-rw-r--r--media/libstagefright/StagefrightMediaScanner.cpp8
-rw-r--r--media/libstagefright/TunnelPlayer.cpp904
-rw-r--r--media/libstagefright/WAVEWriter.cpp323
-rw-r--r--media/libstagefright/WVMExtractor.cpp13
-rw-r--r--media/libstagefright/codecs/mp3dec/Android.mk61
-rw-r--r--media/libstagefright/codecs/mp3dec/MP3Decoder.cpp586
-rw-r--r--media/libstagefright/colorconversion/SoftwareRenderer.cpp23
-rw-r--r--media/libstagefright/id3/Android.mk4
-rw-r--r--media/libstagefright/include/AwesomePlayer.h32
-rw-r--r--media/libstagefright/include/ExtendedExtractor.h58
-rw-r--r--media/libstagefright/include/MP3Decoder.h70
-rw-r--r--media/libstagefright/include/PCMExtractor.h61
-rw-r--r--media/libstagefright/omx/Android.mk4
-rw-r--r--media/libstagefright/omx/OMXMaster.cpp3
-rw-r--r--media/libstagefright/timedtext/Android.mk4
-rw-r--r--media/libstagefright/wifi-display/Android.mk4
-rw-r--r--media/libstagefright/wifi-display/source/WifiDisplaySource.cpp5
-rw-r--r--media/mediaserver/Android.mk5
-rw-r--r--media/mediaserver/main_mediaserver.cpp9
-rw-r--r--services/audioflinger/Android.mk4
-rw-r--r--services/audioflinger/AudioFlinger.cpp935
-rw-r--r--services/audioflinger/AudioFlinger.h215
-rw-r--r--services/audioflinger/AudioMixer.cpp4
-rw-r--r--services/audioflinger/AudioPolicyService.cpp11
-rw-r--r--services/camera/libcameraservice/Camera2Client.cpp2
-rw-r--r--services/camera/libcameraservice/CameraClient.cpp75
-rw-r--r--services/camera/libcameraservice/CameraClient.h3
-rw-r--r--services/camera/libcameraservice/CameraHardwareInterface.h16
-rw-r--r--services/camera/libcameraservice/CameraService.cpp15
-rw-r--r--services/camera/libcameraservice/CameraService.h4
92 files changed, 11222 insertions, 245 deletions
diff --git a/camera/Android.mk b/camera/Android.mk
index 7286f92..9f60da2 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -18,6 +18,14 @@ LOCAL_SHARED_LIBRARIES := \
libui \
libgui
+ifeq ($(BOARD_CAMERA_HAVE_ISO),true)
+ LOCAL_CFLAGS += -DHAVE_ISO
+endif
+
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+ LOCAL_CFLAGS += -DQCOM_HARDWARE
+endif
+
LOCAL_MODULE:= libcamera_client
include $(BUILD_SHARED_LIBRARY)
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index fd91bf2..f41be22 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -26,12 +26,21 @@ namespace android {
// Parameter keys to communicate between camera application and driver.
const char CameraParameters::KEY_PREVIEW_SIZE[] = "preview-size";
const char CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES[] = "preview-size-values";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::KEY_SUPPORTED_HFR_SIZES[] = "hfr-size-values";
+#endif
const char CameraParameters::KEY_PREVIEW_FORMAT[] = "preview-format";
const char CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS[] = "preview-format-values";
const char CameraParameters::KEY_PREVIEW_FRAME_RATE[] = "preview-frame-rate";
const char CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES[] = "preview-frame-rate-values";
const char CameraParameters::KEY_PREVIEW_FPS_RANGE[] = "preview-fps-range";
const char CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE[] = "preview-fps-range-values";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::KEY_PREVIEW_FRAME_RATE_MODE[] = "preview-frame-rate-mode";
+const char CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATE_MODES[] = "preview-frame-rate-modes";
+const char CameraParameters::KEY_PREVIEW_FRAME_RATE_AUTO_MODE[] = "frame-rate-auto";
+const char CameraParameters::KEY_PREVIEW_FRAME_RATE_FIXED_MODE[] = "frame-rate-fixed";
+#endif
const char CameraParameters::KEY_PICTURE_SIZE[] = "picture-size";
const char CameraParameters::KEY_SUPPORTED_PICTURE_SIZES[] = "picture-size-values";
const char CameraParameters::KEY_PICTURE_FORMAT[] = "picture-format";
@@ -51,10 +60,20 @@ const char CameraParameters::KEY_WHITE_BALANCE[] = "whitebalance";
const char CameraParameters::KEY_SUPPORTED_WHITE_BALANCE[] = "whitebalance-values";
const char CameraParameters::KEY_EFFECT[] = "effect";
const char CameraParameters::KEY_SUPPORTED_EFFECTS[] = "effect-values";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::KEY_TOUCH_AF_AEC[] = "touch-af-aec";
+const char CameraParameters::KEY_SUPPORTED_TOUCH_AF_AEC[] = "touch-af-aec-values";
+const char CameraParameters::KEY_TOUCH_INDEX_AEC[] = "touch-index-aec";
+const char CameraParameters::KEY_TOUCH_INDEX_AF[] = "touch-index-af";
+#endif
const char CameraParameters::KEY_ANTIBANDING[] = "antibanding";
const char CameraParameters::KEY_SUPPORTED_ANTIBANDING[] = "antibanding-values";
const char CameraParameters::KEY_SCENE_MODE[] = "scene-mode";
const char CameraParameters::KEY_SUPPORTED_SCENE_MODES[] = "scene-mode-values";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::KEY_SCENE_DETECT[] = "scene-detect";
+const char CameraParameters::KEY_SUPPORTED_SCENE_DETECT[] = "scene-detect-values";
+#endif QCOM_HARDWARE
const char CameraParameters::KEY_FLASH_MODE[] = "flash-mode";
const char CameraParameters::KEY_SUPPORTED_FLASH_MODES[] = "flash-mode-values";
const char CameraParameters::KEY_FOCUS_MODE[] = "focus-mode";
@@ -81,6 +100,28 @@ const char CameraParameters::KEY_ZOOM_SUPPORTED[] = "zoom-supported";
const char CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED[] = "smooth-zoom-supported";
const char CameraParameters::KEY_FOCUS_DISTANCES[] = "focus-distances";
const char CameraParameters::KEY_VIDEO_FRAME_FORMAT[] = "video-frame-format";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::KEY_ISO_MODE[] = "iso";
+const char CameraParameters::KEY_SUPPORTED_ISO_MODES[] = "iso-values";
+const char CameraParameters::KEY_LENSSHADE[] = "lensshade";
+const char CameraParameters::KEY_SUPPORTED_LENSSHADE_MODES[] = "lensshade-values";
+const char CameraParameters::KEY_AUTO_EXPOSURE[] = "auto-exposure";
+const char CameraParameters::KEY_SUPPORTED_AUTO_EXPOSURE[] = "auto-exposure-values";
+const char CameraParameters::KEY_DENOISE[] = "denoise";
+const char CameraParameters::KEY_SUPPORTED_DENOISE[] = "denoise-values";
+const char CameraParameters::KEY_SELECTABLE_ZONE_AF[] = "selectable-zone-af";
+const char CameraParameters::KEY_SUPPORTED_SELECTABLE_ZONE_AF[] = "selectable-zone-af-values";
+const char CameraParameters::KEY_FACE_DETECTION[] = "face-detection";
+const char CameraParameters::KEY_SUPPORTED_FACE_DETECTION[] = "face-detection-values";
+const char CameraParameters::KEY_MEMORY_COLOR_ENHANCEMENT[] = "mce";
+const char CameraParameters::KEY_SUPPORTED_MEM_COLOR_ENHANCE_MODES[] = "mce-values";
+const char CameraParameters::KEY_VIDEO_HIGH_FRAME_RATE[] = "video-hfr";
+const char CameraParameters::KEY_SUPPORTED_VIDEO_HIGH_FRAME_RATE_MODES[] = "video-hfr-values";
+const char CameraParameters::KEY_REDEYE_REDUCTION[] = "redeye-reduction";
+const char CameraParameters::KEY_SUPPORTED_REDEYE_REDUCTION[] = "redeye-reduction-values";
+const char CameraParameters::KEY_HIGH_DYNAMIC_RANGE_IMAGING[] = "hdr";
+const char CameraParameters::KEY_SUPPORTED_HDR_IMAGING_MODES[] = "hdr-values";
+#endif
const char CameraParameters::KEY_VIDEO_SIZE[] = "video-size";
const char CameraParameters::KEY_SUPPORTED_VIDEO_SIZES[] = "video-size-values";
const char CameraParameters::KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO[] = "preferred-preview-size-for-video";
@@ -88,8 +129,33 @@ const char CameraParameters::KEY_MAX_NUM_DETECTED_FACES_HW[] = "max-num-detected
const char CameraParameters::KEY_MAX_NUM_DETECTED_FACES_SW[] = "max-num-detected-faces-sw";
const char CameraParameters::KEY_RECORDING_HINT[] = "recording-hint";
const char CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED[] = "video-snapshot-supported";
+const char CameraParameters::KEY_FULL_VIDEO_SNAP_SUPPORTED[] = "full-video-snap-supported";
const char CameraParameters::KEY_VIDEO_STABILIZATION[] = "video-stabilization";
const char CameraParameters::KEY_VIDEO_STABILIZATION_SUPPORTED[] = "video-stabilization-supported";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::KEY_ZSL[] = "zsl";
+const char CameraParameters::KEY_SUPPORTED_ZSL_MODES[] = "zsl-values";
+const char CameraParameters::KEY_CAMERA_MODE[] = "camera-mode";
+const char CameraParameters::KEY_POWER_MODE[] = "power-mode";
+const char CameraParameters::KEY_POWER_MODE_SUPPORTED[] = "power-mode-supported";
+#endif
+const char CameraParameters::KEY_AE_BRACKET_HDR[] = "ae-bracket-hdr";
+
+/*only effective when KEY_AE_BRACKET_HDR set to ae_bracketing*/
+//const char CameraParameters::KEY_AE_BRACKET_SETTING_KEY[] = "ae-bracket-setting";
+
+#ifdef HAVE_ISO
+const char CameraParameters::KEY_SUPPORTED_ISO_MODES[] = "iso-values";
+const char CameraParameters::KEY_ISO_MODE[] = "iso";
+#endif
+
+#ifdef SAMSUNG_CAMERA_HARDWARE
+const char CameraParameters::KEY_ANTI_SHAKE_MODE[] = "anti-shake";
+const char CameraParameters::KEY_METERING[] = "metering";
+const char CameraParameters::KEY_WDR[] = "wdr";
+const char CameraParameters::KEY_WEATHER[] = "weather";
+const char CameraParameters::KEY_CITYID[] = "contextualtag-cityid";
+#endif
const char CameraParameters::TRUE[] = "true";
const char CameraParameters::FALSE[] = "false";
@@ -115,6 +181,15 @@ const char CameraParameters::EFFECT_POSTERIZE[] = "posterize";
const char CameraParameters::EFFECT_WHITEBOARD[] = "whiteboard";
const char CameraParameters::EFFECT_BLACKBOARD[] = "blackboard";
const char CameraParameters::EFFECT_AQUA[] = "aqua";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::EFFECT_EMBOSS[] = "emboss";
+const char CameraParameters::EFFECT_SKETCH[] = "sketch";
+const char CameraParameters::EFFECT_NEON[] = "neon";
+
+// Values for auto exposure settings.
+const char CameraParameters::TOUCH_AF_AEC_OFF[] = "touch-off";
+const char CameraParameters::TOUCH_AF_AEC_ON[] = "touch-on";
+#endif
// Values for antibanding settings.
const char CameraParameters::ANTIBANDING_AUTO[] = "auto";
@@ -130,7 +205,8 @@ const char CameraParameters::FLASH_MODE_RED_EYE[] = "red-eye";
const char CameraParameters::FLASH_MODE_TORCH[] = "torch";
// Values for scene mode settings.
-const char CameraParameters::SCENE_MODE_AUTO[] = "auto";
+const char CameraParameters::SCENE_MODE_AUTO[] = "auto"; // corresponds to CAMERA_BESTSHOT_OFF in HAL
+const char CameraParameters::SCENE_MODE_ASD[] = "asd"; // corresponds to CAMERA_BESTSHOT_AUTO in HAL
const char CameraParameters::SCENE_MODE_ACTION[] = "action";
const char CameraParameters::SCENE_MODE_PORTRAIT[] = "portrait";
const char CameraParameters::SCENE_MODE_LANDSCAPE[] = "landscape";
@@ -145,11 +221,28 @@ const char CameraParameters::SCENE_MODE_FIREWORKS[] = "fireworks";
const char CameraParameters::SCENE_MODE_SPORTS[] = "sports";
const char CameraParameters::SCENE_MODE_PARTY[] = "party";
const char CameraParameters::SCENE_MODE_CANDLELIGHT[] = "candlelight";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::SCENE_MODE_BACKLIGHT[] = "backlight";
+const char CameraParameters::SCENE_MODE_FLOWERS[] = "flowers";
+#endif
const char CameraParameters::SCENE_MODE_BARCODE[] = "barcode";
+
const char CameraParameters::SCENE_MODE_HDR[] = "hdr";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::SCENE_MODE_AR[] = "AR";
+
+// Values for auto scene detection settings.
+const char CameraParameters::SCENE_DETECT_OFF[] = "off";
+const char CameraParameters::SCENE_DETECT_ON[] = "on";
+#endif
+
+// Formats for setPreviewFormat and setPictureFormat.
const char CameraParameters::PIXEL_FORMAT_YUV422SP[] = "yuv422sp";
const char CameraParameters::PIXEL_FORMAT_YUV420SP[] = "yuv420sp";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::PIXEL_FORMAT_YUV420SP_ADRENO[] = "yuv420sp-adreno";
+#endif
const char CameraParameters::PIXEL_FORMAT_YUV422I[] = "yuv422i-yuyv";
const char CameraParameters::PIXEL_FORMAT_YUV420P[] = "yuv420p";
const char CameraParameters::PIXEL_FORMAT_RGB565[] = "rgb565";
@@ -158,6 +251,12 @@ const char CameraParameters::PIXEL_FORMAT_JPEG[] = "jpeg";
const char CameraParameters::PIXEL_FORMAT_BAYER_RGGB[] = "bayer-rggb";
const char CameraParameters::PIXEL_FORMAT_ANDROID_OPAQUE[] = "android-opaque";
+#ifdef QCOM_HARDWARE
+const char CameraParameters::PIXEL_FORMAT_RAW[] = "raw";
+const char CameraParameters::PIXEL_FORMAT_YV12[] = "yuv420p";
+const char CameraParameters::PIXEL_FORMAT_NV12[] = "nv12";
+#endif
+
// Values for focus mode settings.
const char CameraParameters::FOCUS_MODE_AUTO[] = "auto";
const char CameraParameters::FOCUS_MODE_INFINITY[] = "infinity";
@@ -166,6 +265,118 @@ const char CameraParameters::FOCUS_MODE_FIXED[] = "fixed";
const char CameraParameters::FOCUS_MODE_EDOF[] = "edof";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO[] = "continuous-video";
const char CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE[] = "continuous-picture";
+#if defined(QCOM_HARDWARE)
+const char CameraParameters::FOCUS_MODE_NORMAL[] = "normal";
+
+
+const char CameraParameters::KEY_SKIN_TONE_ENHANCEMENT[] = "skinToneEnhancement";
+const char CameraParameters::KEY_SUPPORTED_SKIN_TONE_ENHANCEMENT_MODES[] = "skinToneEnhancement-values";
+
+// Values for ISO Settings
+const char CameraParameters::ISO_AUTO[] = "auto";
+const char CameraParameters::ISO_HJR[] = "ISO_HJR";
+const char CameraParameters::ISO_100[] = "ISO100";
+const char CameraParameters::ISO_200[] = "ISO200";
+const char CameraParameters::ISO_400[] = "ISO400";
+const char CameraParameters::ISO_800[] = "ISO800";
+const char CameraParameters::ISO_1600[] = "ISO1600";
+const char CameraParameters::ISO_3200[] = "ISO3200";
+const char CameraParameters::ISO_6400[] = "ISO6400";
+
+ //Values for Lens Shading
+const char CameraParameters::LENSSHADE_ENABLE[] = "enable";
+const char CameraParameters::LENSSHADE_DISABLE[] = "disable";
+
+// Values for auto exposure settings.
+const char CameraParameters::AUTO_EXPOSURE_FRAME_AVG[] = "frame-average";
+const char CameraParameters::AUTO_EXPOSURE_CENTER_WEIGHTED[] = "center-weighted";
+const char CameraParameters::AUTO_EXPOSURE_SPOT_METERING[] = "spot-metering";
+
+const char CameraParameters::KEY_GPS_LATITUDE_REF[] = "gps-latitude-ref";
+const char CameraParameters::KEY_GPS_LONGITUDE_REF[] = "gps-longitude-ref";
+const char CameraParameters::KEY_GPS_ALTITUDE_REF[] = "gps-altitude-ref";
+const char CameraParameters::KEY_GPS_STATUS[] = "gps-status";
+const char CameraParameters::KEY_EXIF_DATETIME[] = "exif-datetime";
+
+const char CameraParameters::KEY_HISTOGRAM[] = "histogram";
+const char CameraParameters::KEY_SUPPORTED_HISTOGRAM_MODES[] = "histogram-values";
+//Values for Histogram Shading
+const char CameraParameters::HISTOGRAM_ENABLE[] = "enable";
+const char CameraParameters::HISTOGRAM_DISABLE[] = "disable";
+
+//Values for Skin Tone Enhancement Modes
+const char CameraParameters::SKIN_TONE_ENHANCEMENT_ENABLE[] = "enable";
+const char CameraParameters::SKIN_TONE_ENHANCEMENT_DISABLE[] = "disable";
+
+const char CameraParameters::KEY_SHARPNESS[] = "sharpness";
+const char CameraParameters::KEY_MAX_SHARPNESS[] = "max-sharpness";
+const char CameraParameters::KEY_CONTRAST[] = "contrast";
+const char CameraParameters::KEY_MAX_CONTRAST[] = "max-contrast";
+const char CameraParameters::KEY_SATURATION[] = "saturation";
+const char CameraParameters::KEY_MAX_SATURATION[] = "max-saturation";
+
+//Values for DENOISE
+const char CameraParameters::DENOISE_OFF[] = "denoise-off";
+const char CameraParameters::DENOISE_ON[] = "denoise-on";
+// Values for selectable zone af Settings
+const char CameraParameters::SELECTABLE_ZONE_AF_AUTO[] = "auto";
+const char CameraParameters::SELECTABLE_ZONE_AF_SPOT_METERING[] = "spot-metering";
+const char CameraParameters::SELECTABLE_ZONE_AF_CENTER_WEIGHTED[] = "center-weighted";
+const char CameraParameters::SELECTABLE_ZONE_AF_FRAME_AVERAGE[] = "frame-average";
+
+// Values for Face Detection settings.
+const char CameraParameters::FACE_DETECTION_OFF[] = "off";
+const char CameraParameters::FACE_DETECTION_ON[] = "on";
+
+// Values for MCE settings.
+const char CameraParameters::MCE_ENABLE[] = "enable";
+const char CameraParameters::MCE_DISABLE[] = "disable";
+
+// Values for HFR settings.
+const char CameraParameters::VIDEO_HFR_OFF[] = "off";
+const char CameraParameters::VIDEO_HFR_2X[] = "60";
+const char CameraParameters::VIDEO_HFR_3X[] = "90";
+const char CameraParameters::VIDEO_HFR_4X[] = "120";
+
+// Values for Redeye Reduction settings.
+const char CameraParameters::REDEYE_REDUCTION_ENABLE[] = "enable";
+const char CameraParameters::REDEYE_REDUCTION_DISABLE[] = "disable";
+// Values for HDR settings.
+const char CameraParameters::HDR_ENABLE[] = "enable";
+const char CameraParameters::HDR_DISABLE[] = "disable";
+
+// Values for ZSL settings.
+const char CameraParameters::ZSL_OFF[] = "off";
+const char CameraParameters::ZSL_ON[] = "on";
+
+// Values for HDR Bracketing settings.
+const char CameraParameters::AE_BRACKET_HDR_OFF[] = "Off";
+const char CameraParameters::AE_BRACKET_HDR[] = "HDR";
+const char CameraParameters::AE_BRACKET[] = "AE-Bracket";
+
+const char CameraParameters::LOW_POWER[] = "Low_Power";
+const char CameraParameters::NORMAL_POWER[] = "Normal_Power";
+
+static const char* portrait = "portrait";
+static const char* landscape = "landscape";
+
+int CameraParameters::getOrientation() const
+{
+ const char* orientation = get("orientation");
+ if (orientation && !strcmp(orientation, portrait))
+ return CAMERA_ORIENTATION_PORTRAIT;
+ return CAMERA_ORIENTATION_LANDSCAPE;
+}
+void CameraParameters::setOrientation(int orientation)
+{
+ if (orientation == CAMERA_ORIENTATION_PORTRAIT) {
+ set("orientation", portrait);
+ } else {
+ set("orientation", landscape);
+ }
+}
+#endif
+
CameraParameters::CameraParameters()
: mMap()
@@ -248,7 +459,7 @@ void CameraParameters::set(const char *key, const char *value)
void CameraParameters::set(const char *key, int value)
{
char str[16];
- sprintf(str, "%d", value);
+ snprintf(str, sizeof(str), "%d", value);
set(key, str);
}
@@ -275,6 +486,13 @@ int CameraParameters::getInt(const char *key) const
return strtol(v, 0, 0);
}
+#ifdef SAMSUNG_CAMERA_HARDWARE
+int CameraParameters::getInt64(const char *key) const
+{
+ return -1;
+}
+#endif
+
float CameraParameters::getFloat(const char *key) const
{
const char *v = get(key);
@@ -313,6 +531,32 @@ static int parse_pair(const char *str, int *first, int *second, char delim,
return 0;
}
+// Parse string like "(1, 2, 3, 4, ..., N)"
+// num is pointer to an allocated array of size N
+static int parseNDimVector(const char *str, int *num, int N, char delim = ',')
+{
+ char *start, *end;
+ if(num == NULL) {
+ ALOGE("Invalid output array (num == NULL)");
+ return -1;
+ }
+ //check if string starts and ends with parantheses
+ if(str[0] != '(' || str[strlen(str)-1] != ')') {
+ ALOGE("Invalid format of string %s, valid format is (n1, n2, n3, n4 ...)", str);
+ return -1;
+ }
+ start = (char*) str;
+ start++;
+ for(int i=0; i<N; i++) {
+ *(num+i) = (int) strtol(start, &end, 10);
+ if(*end != delim && i < N-1) {
+ ALOGE("Cannot find delimeter '%c' in string \"%s\". end = %c", delim, str, *end);
+ return -1;
+ }
+ start = end+1;
+ }
+ return 0;
+}
static void parseSizesList(const char *sizesStr, Vector<Size> &sizes)
{
if (sizesStr == 0) {
@@ -341,7 +585,7 @@ static void parseSizesList(const char *sizesStr, Vector<Size> &sizes)
void CameraParameters::setPreviewSize(int width, int height)
{
char str[32];
- sprintf(str, "%dx%d", width, height);
+ snprintf(str, sizeof(str), "%dx%d", width, height);
set(KEY_PREVIEW_SIZE, str);
}
@@ -368,6 +612,21 @@ void CameraParameters::getSupportedPreviewSizes(Vector<Size> &sizes) const
parseSizesList(previewSizesStr, sizes);
}
+#ifdef QCOM_HARDWARE
+void CameraParameters::getSupportedHfrSizes(Vector<Size> &sizes) const
+{
+ const char *hfrSizesStr = get(KEY_SUPPORTED_HFR_SIZES);
+ parseSizesList(hfrSizesStr, sizes);
+}
+
+void CameraParameters::setPreviewFpsRange(int minFPS, int maxFPS)
+{
+ char str[32];
+ snprintf(str, sizeof(str), "%d,%d",minFPS,maxFPS);
+ set(KEY_PREVIEW_FPS_RANGE,str);
+}
+#endif
+
void CameraParameters::setVideoSize(int width, int height)
{
char str[32];
@@ -407,6 +666,18 @@ void CameraParameters::getPreviewFpsRange(int *min_fps, int *max_fps) const
parse_pair(p, min_fps, max_fps, ',');
}
+#ifdef QCOM_HARDWARE
+void CameraParameters::setPreviewFrameRateMode(const char *mode)
+{
+ set(KEY_PREVIEW_FRAME_RATE_MODE, mode);
+}
+
+const char *CameraParameters::getPreviewFrameRateMode() const
+{
+ return get(KEY_PREVIEW_FRAME_RATE_MODE);
+}
+#endif
+
void CameraParameters::setPreviewFormat(const char *format)
{
set(KEY_PREVIEW_FORMAT, format);
@@ -460,6 +731,71 @@ void CameraParameters::dump() const
}
}
+#ifdef QCOM_HARDWARE
+void CameraParameters::setTouchIndexAec(int x, int y)
+{
+ char str[32];
+ snprintf(str, sizeof(str), "%dx%d", x, y);
+ set(KEY_TOUCH_INDEX_AEC, str);
+}
+
+void CameraParameters::getTouchIndexAec(int *x, int *y) const
+{
+ *x = -1;
+ *y = -1;
+
+ // Get the current string, if it doesn't exist, leave the -1x-1
+ const char *p = get(KEY_TOUCH_INDEX_AEC);
+ if (p == 0)
+ return;
+
+ int tempX, tempY;
+ if (parse_pair(p, &tempX, &tempY, 'x') == 0) {
+ *x = tempX;
+ *y = tempY;
+ }
+}
+
+void CameraParameters::setTouchIndexAf(int x, int y)
+{
+ char str[32];
+ snprintf(str, sizeof(str), "%dx%d", x, y);
+ set(KEY_TOUCH_INDEX_AF, str);
+}
+
+void CameraParameters::getMeteringAreaCenter(int *x, int *y) const
+{
+ //Default invalid values
+ *x = -2000;
+ *y = -2000;
+
+ const char *p = get(KEY_METERING_AREAS);
+ if(p != NULL) {
+ int arr[5] = {-2000, -2000, -2000, -2000, 0};
+ parseNDimVector(p, arr, 5); //p = "(x1, y1, x2, y2, weight)"
+ *x = (arr[0] + arr[2])/2; //center_x = (x1+x2)/2
+ *y = (arr[1] + arr[3])/2; //center_y = (y1+y2)/2
+ }
+}
+
+void CameraParameters::getTouchIndexAf(int *x, int *y) const
+{
+ *x = -1;
+ *y = -1;
+
+ // Get the current string, if it doesn't exist, leave the -1x-1
+ const char *p = get(KEY_TOUCH_INDEX_AF);
+ if (p == 0)
+ return;
+
+ int tempX, tempY;
+ if (parse_pair(p, &tempX, &tempY, 'x') == 0) {
+ *x = tempX;
+ *y = tempY;
+ }
+}
+#endif
+
status_t CameraParameters::dump(int fd, const Vector<String16>& args) const
{
const size_t SIZE = 256;
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index 234e165..ad4db3b 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -46,6 +46,9 @@ struct CameraInfo {
* right of the screen, the value should be 270.
*/
int orientation;
+#ifdef QCOM_HARDWARE
+ int mode;
+#endif
};
class ICameraService;
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index 5540d32..21e907e 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -36,7 +36,21 @@ struct Size {
height = h;
}
};
-
+#ifdef QCOM_HARDWARE
+struct FPSRange{
+ int minFPS;
+ int maxFPS;
+
+ FPSRange(){
+ minFPS=0;
+ maxFPS=0;
+ };
+ FPSRange(int min,int max){
+ minFPS=min;
+ maxFPS=max;
+ };
+};
+#endif
class CameraParameters
{
public:
@@ -52,6 +66,9 @@ public:
void setFloat(const char *key, float value);
const char *get(const char *key) const;
int getInt(const char *key) const;
+#ifdef SAMSUNG_CAMERA_HARDWARE
+ int getInt64(const char *key) const;
+#endif
float getFloat(const char *key) const;
void remove(const char *key);
@@ -91,6 +108,10 @@ public:
void setPreviewFrameRate(int fps);
int getPreviewFrameRate() const;
void getPreviewFpsRange(int *min_fps, int *max_fps) const;
+#ifdef QCOM_HARDWARE
+ void setPreviewFrameRateMode(const char *mode);
+ const char *getPreviewFrameRateMode() const;
+#endif
void setPreviewFormat(const char *format);
const char *getPreviewFormat() const;
void setPictureSize(int width, int height);
@@ -98,6 +119,14 @@ public:
void getSupportedPictureSizes(Vector<Size> &sizes) const;
void setPictureFormat(const char *format);
const char *getPictureFormat() const;
+#ifdef QCOM_HARDWARE
+ void setTouchIndexAec(int x, int y);
+ void getTouchIndexAec(int *x, int *y) const;
+ void setTouchIndexAf(int x, int y);
+ void getTouchIndexAf(int *x, int *y) const;
+#endif
+
+ void getMeteringAreaCenter(int * x, int *y) const;
void dump() const;
status_t dump(int fd, const Vector<String16>& args) const;
@@ -112,6 +141,11 @@ public:
// Supported preview frame sizes in pixels.
// Example value: "800x600,480x320". Read only.
static const char KEY_SUPPORTED_PREVIEW_SIZES[];
+#ifdef QCOM_HARDWARE
+ // Supported PREVIEW/RECORDING SIZES IN HIGH FRAME RATE recording, sizes in pixels.
+ // Example value: "800x480,432x320". Read only.
+ static const char KEY_SUPPORTED_HFR_SIZES[];
+#endif
// The current minimum and maximum preview fps. This controls the rate of
// preview frames received (CAMERA_MSG_PREVIEW_FRAME). The minimum and
// maximum fps must be one of the elements from
@@ -141,6 +175,14 @@ public:
// Supported number of preview frames per second.
// Example value: "24,15,10". Read.
static const char KEY_SUPPORTED_PREVIEW_FRAME_RATES[];
+#ifdef QCOM_HARDWARE
+ // The mode of preview frame rate.
+ // Example value: "frame-rate-auto, frame-rate-fixed".
+ static const char KEY_PREVIEW_FRAME_RATE_MODE[];
+ static const char KEY_SUPPORTED_PREVIEW_FRAME_RATE_MODES[];
+ static const char KEY_PREVIEW_FRAME_RATE_AUTO_MODE[];
+ static const char KEY_PREVIEW_FRAME_RATE_FIXED_MODE[];
+#endif
// The dimensions for captured pictures in pixels (width x height).
// Example value: "1024x768". Read/write.
static const char KEY_PICTURE_SIZE[];
@@ -203,6 +245,12 @@ public:
// header.
// Example value: "21.0" or "-5". Write only.
static const char KEY_GPS_ALTITUDE[];
+
+#ifdef QCOM_HARDWARE
+ static const char KEY_SKIN_TONE_ENHANCEMENT[] ;
+ static const char KEY_SUPPORTED_SKIN_TONE_ENHANCEMENT_MODES[] ;
+#endif
+
// GPS timestamp (UTC in seconds since January 1, 1970). This should be
// stored in JPEG EXIF header.
// Example value: "1251192757". Write only.
@@ -222,6 +270,15 @@ public:
// Supported color effect settings.
// Example value: "none,mono,sepia". Read only.
static const char KEY_SUPPORTED_EFFECTS[];
+#ifdef QCOM_HARDWARE
+ //Touch Af/AEC settings.
+ static const char KEY_TOUCH_AF_AEC[];
+ static const char KEY_SUPPORTED_TOUCH_AF_AEC[];
+ //Touch Index for AEC.
+ static const char KEY_TOUCH_INDEX_AEC[];
+ //Touch Index for AF.
+ static const char KEY_TOUCH_INDEX_AF[];
+#endif
// Current antibanding setting.
// Example value: "auto" or ANTIBANDING_XXX constants. Read/write.
static const char KEY_ANTIBANDING[];
@@ -234,6 +291,14 @@ public:
// Supported scene mode settings.
// Example value: "auto,night,fireworks". Read only.
static const char KEY_SUPPORTED_SCENE_MODES[];
+#ifdef QCOM_HARDWARE
+ // Current auto scene detection mode.
+ // Example value: "off" or SCENE_DETECT_XXX constants. Read/write.
+ static const char KEY_SCENE_DETECT[];
+ // Supported auto scene detection settings.
+ // Example value: "off,backlight,snow/cloudy". Read only.
+ static const char KEY_SUPPORTED_SCENE_DETECT[];
+#endif
// Current flash mode.
// Example value: "auto" or FLASH_MODE_XXX constants. Read/write.
static const char KEY_FLASH_MODE[];
@@ -505,6 +570,23 @@ public:
// captured pictures.
// Example value: "true" or "false". Read only.
static const char KEY_VIDEO_SNAPSHOT_SUPPORTED[];
+ static const char KEY_FULL_VIDEO_SNAP_SUPPORTED[];
+
+#ifdef QCOM_HARDWARE
+ static const char KEY_ISO_MODE[];
+ static const char KEY_SUPPORTED_ISO_MODES[];
+ static const char KEY_LENSSHADE[] ;
+ static const char KEY_SUPPORTED_LENSSHADE_MODES[] ;
+
+ static const char KEY_AUTO_EXPOSURE[];
+ static const char KEY_SUPPORTED_AUTO_EXPOSURE[];
+
+ static const char KEY_GPS_LATITUDE_REF[];
+ static const char KEY_GPS_LONGITUDE_REF[];
+ static const char KEY_GPS_ALTITUDE_REF[];
+ static const char KEY_GPS_STATUS[];
+ static const char KEY_EXIF_DATETIME[];
+#endif
// The state of the video stabilization. If set to true, both the
// preview stream and the recorded video stream are stabilized by
@@ -520,11 +602,42 @@ public:
// has no effect on still image capture.
static const char KEY_VIDEO_STABILIZATION[];
+#ifdef QCOM_HARDWARE
+ static const char KEY_MEMORY_COLOR_ENHANCEMENT[];
+ static const char KEY_SUPPORTED_MEM_COLOR_ENHANCE_MODES[];
+
+ static const char KEY_POWER_MODE_SUPPORTED[];
+ static const char KEY_POWER_MODE[];
+
+ static const char KEY_ZSL[];
+ static const char KEY_SUPPORTED_ZSL_MODES[];
+
+ static const char KEY_CAMERA_MODE[];
+
+ static const char KEY_VIDEO_HIGH_FRAME_RATE[];
+ static const char KEY_SUPPORTED_VIDEO_HIGH_FRAME_RATE_MODES[];
+ static const char KEY_HIGH_DYNAMIC_RANGE_IMAGING[];
+ static const char KEY_SUPPORTED_HDR_IMAGING_MODES[];
+#endif
// Returns true if video stabilization is supported. That is, applications
// can set KEY_VIDEO_STABILIZATION to true and have a stabilized preview
// stream and record stabilized videos.
static const char KEY_VIDEO_STABILIZATION_SUPPORTED[];
+#ifdef HAVE_ISO
+ static const char KEY_SUPPORTED_ISO_MODES[];
+ static const char KEY_ISO_MODE[];
+#endif
+
+#ifdef SAMSUNG_CAMERA_HARDWARE
+ static const char KEY_ANTI_SHAKE_MODE[];
+ static const char KEY_METERING[];
+ static const char KEY_WDR[];
+ static const char KEY_WEATHER[];
+ static const char KEY_CITYID[];
+#endif
+ static const char KEY_AE_BRACKET_HDR[];
+
// Value for KEY_ZOOM_SUPPORTED or KEY_SMOOTH_ZOOM_SUPPORTED.
static const char TRUE[];
static const char FALSE[];
@@ -532,6 +645,24 @@ public:
// Value for KEY_FOCUS_DISTANCES.
static const char FOCUS_DISTANCE_INFINITY[];
+#ifdef QCOM_HARDWARE
+ // DENOISE
+ static const char KEY_DENOISE[];
+ static const char KEY_SUPPORTED_DENOISE[];
+
+ //Selectable zone AF.
+ static const char KEY_SELECTABLE_ZONE_AF[];
+ static const char KEY_SUPPORTED_SELECTABLE_ZONE_AF[];
+
+ //Face Detection
+ static const char KEY_FACE_DETECTION[];
+ static const char KEY_SUPPORTED_FACE_DETECTION[];
+
+ //Redeye Reduction
+ static const char KEY_REDEYE_REDUCTION[];
+ static const char KEY_SUPPORTED_REDEYE_REDUCTION[];
+#endif
+
// Values for white balance settings.
static const char WHITE_BALANCE_AUTO[];
static const char WHITE_BALANCE_INCANDESCENT[];
@@ -552,6 +683,15 @@ public:
static const char EFFECT_WHITEBOARD[];
static const char EFFECT_BLACKBOARD[];
static const char EFFECT_AQUA[];
+#ifdef QCOM_HARDWARE
+ static const char EFFECT_EMBOSS[];
+ static const char EFFECT_SKETCH[];
+ static const char EFFECT_NEON[];
+
+ // Values for Touch AF/AEC
+ static const char TOUCH_AF_AEC_OFF[] ;
+ static const char TOUCH_AF_AEC_ON[] ;
+#endif
// Values for antibanding settings.
static const char ANTIBANDING_AUTO[];
@@ -576,6 +716,7 @@ public:
// Values for scene mode settings.
static const char SCENE_MODE_AUTO[];
+ static const char SCENE_MODE_ASD[];
static const char SCENE_MODE_ACTION[];
static const char SCENE_MODE_PORTRAIT[];
static const char SCENE_MODE_LANDSCAPE[];
@@ -590,6 +731,11 @@ public:
static const char SCENE_MODE_SPORTS[];
static const char SCENE_MODE_PARTY[];
static const char SCENE_MODE_CANDLELIGHT[];
+#ifdef QCOM_HARDWARE
+ static const char SCENE_MODE_BACKLIGHT[];
+ static const char SCENE_MODE_FLOWERS[];
+ static const char SCENE_MODE_AR[];
+#endif
// Applications are looking for a barcode. Camera driver will be optimized
// for barcode reading.
static const char SCENE_MODE_BARCODE[];
@@ -600,8 +746,15 @@ public:
// Pixel color formats for KEY_PREVIEW_FORMAT, KEY_PICTURE_FORMAT,
// and KEY_VIDEO_FRAME_FORMAT
+#ifdef QCOM_HARDWARE
+ static const char SCENE_DETECT_OFF[];
+ static const char SCENE_DETECT_ON[];
+#endif
static const char PIXEL_FORMAT_YUV422SP[];
static const char PIXEL_FORMAT_YUV420SP[]; // NV21
+#ifdef QCOM_HARDWARE
+ static const char PIXEL_FORMAT_YUV420SP_ADRENO[]; // ADRENO
+#endif
static const char PIXEL_FORMAT_YUV422I[]; // YUY2
static const char PIXEL_FORMAT_YUV420P[]; // YV12
static const char PIXEL_FORMAT_RGB565[];
@@ -610,9 +763,16 @@ public:
// Raw bayer format used for images, which is 10 bit precision samples
// stored in 16 bit words. The filter pattern is RGGB.
static const char PIXEL_FORMAT_BAYER_RGGB[];
+
// Pixel format is not known to the framework
static const char PIXEL_FORMAT_ANDROID_OPAQUE[];
+#ifdef QCOM_HARDWARE
+ static const char PIXEL_FORMAT_RAW[];
+ static const char PIXEL_FORMAT_YV12[]; // NV21
+ static const char PIXEL_FORMAT_NV12[]; //NV12
+#endif
+
// Values for focus mode settings.
// Auto-focus mode. Applications should call
// CameraHardwareInterface.autoFocus to start the focus in this mode.
@@ -664,6 +824,107 @@ public:
// other modes.
static const char FOCUS_MODE_CONTINUOUS_PICTURE[];
+#ifdef QCOM_HARDWARE
+ // Normal focus mode. Applications should call
+ // CameraHardwareInterface.autoFocus to start the focus in this mode.
+ static const char FOCUS_MODE_NORMAL[];
+ static const char ISO_AUTO[];
+ static const char ISO_HJR[] ;
+ static const char ISO_100[];
+ static const char ISO_200[] ;
+ static const char ISO_400[];
+ static const char ISO_800[];
+ static const char ISO_1600[];
+ static const char ISO_3200[];
+ static const char ISO_6400[];
+ // Values for Lens Shading
+ static const char LENSSHADE_ENABLE[] ;
+ static const char LENSSHADE_DISABLE[] ;
+
+ // Values for auto exposure settings.
+ static const char AUTO_EXPOSURE_FRAME_AVG[];
+ static const char AUTO_EXPOSURE_CENTER_WEIGHTED[];
+ static const char AUTO_EXPOSURE_SPOT_METERING[];
+
+ static const char KEY_SHARPNESS[];
+ static const char KEY_MAX_SHARPNESS[];
+ static const char KEY_CONTRAST[];
+ static const char KEY_MAX_CONTRAST[];
+ static const char KEY_SATURATION[];
+ static const char KEY_MAX_SATURATION[];
+
+ static const char KEY_HISTOGRAM[] ;
+ static const char KEY_SUPPORTED_HISTOGRAM_MODES[] ;
+ // Values for HISTOGRAM
+ static const char HISTOGRAM_ENABLE[] ;
+ static const char HISTOGRAM_DISABLE[] ;
+
+ // Values for SKIN TONE ENHANCEMENT
+ static const char SKIN_TONE_ENHANCEMENT_ENABLE[] ;
+ static const char SKIN_TONE_ENHANCEMENT_DISABLE[] ;
+
+ // Values for Denoise
+ static const char DENOISE_OFF[] ;
+ static const char DENOISE_ON[] ;
+
+ // Values for auto exposure settings.
+ static const char SELECTABLE_ZONE_AF_AUTO[];
+ static const char SELECTABLE_ZONE_AF_SPOT_METERING[];
+ static const char SELECTABLE_ZONE_AF_CENTER_WEIGHTED[];
+ static const char SELECTABLE_ZONE_AF_FRAME_AVERAGE[];
+
+ // Values for Face Detection settings.
+ static const char FACE_DETECTION_OFF[];
+ static const char FACE_DETECTION_ON[];
+
+ // Values for MCE settings.
+ static const char MCE_ENABLE[];
+ static const char MCE_DISABLE[];
+
+ // Values for ZSL settings.
+ static const char ZSL_OFF[];
+ static const char ZSL_ON[];
+
+ // Values for HDR Bracketing settings.
+ static const char AE_BRACKET_HDR_OFF[];
+ static const char AE_BRACKET_HDR[];
+ static const char AE_BRACKET[];
+
+ //POWER MODE
+ static const char LOW_POWER[];
+ static const char NORMAL_POWER[];
+
+ // Values for HFR settings.
+ static const char VIDEO_HFR_OFF[];
+ static const char VIDEO_HFR_2X[];
+ static const char VIDEO_HFR_3X[];
+ static const char VIDEO_HFR_4X[];
+
+ // Values for Redeye Reduction settings.
+ static const char REDEYE_REDUCTION_ENABLE[];
+ static const char REDEYE_REDUCTION_DISABLE[];
+ // Values for HDR settings.
+ static const char HDR_ENABLE[];
+ static const char HDR_DISABLE[];
+
+ // Values for Redeye Reduction settings.
+ // static const char REDEYE_REDUCTION_ENABLE[];
+ // static const char REDEYE_REDUCTION_DISABLE[];
+ // Values for HDR settings.
+ // static const char HDR_ENABLE[];
+ // static const char HDR_DISABLE[];
+
+ enum {
+ CAMERA_ORIENTATION_UNKNOWN = 0,
+ CAMERA_ORIENTATION_PORTRAIT = 1,
+ CAMERA_ORIENTATION_LANDSCAPE = 2,
+ };
+ int getOrientation() const;
+ void setOrientation(int orientation);
+ void setPreviewFpsRange(int minFPS,int maxFPS);
+ void getSupportedHfrSizes(Vector<Size> &sizes) const;
+#endif
+
private:
DefaultKeyedVector<String8,String8> mMap;
};
diff --git a/include/media/AudioParameter.h b/include/media/AudioParameter.h
index 891bc4b..d29c699 100644
--- a/include/media/AudioParameter.h
+++ b/include/media/AudioParameter.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2008-2011 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -48,6 +49,13 @@ public:
static const char * const keyFrameCount;
static const char * const keyInputSource;
static const char * const keyScreenState;
+#ifdef QCOM_HARDWARE
+ static const char * const keyHandleFm;
+ static const char * const keyVoipCheck;
+ static const char * const keyFluenceType;
+ static const char * const keySSR;
+ static const char * const keyHandleA2dpDevice;
+#endif
String8 toString();
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 49e1afc..d8c57d3 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -1,4 +1,8 @@
/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -144,6 +148,10 @@ public:
INPUT_CLOSED,
INPUT_CONFIG_CHANGED,
STREAM_CONFIG_CHANGED,
+#ifdef QCOM_HARDWARE
+ A2DP_OUTPUT_STATE,
+ EFFECT_CONFIG_CHANGED,
+#endif
NUM_CONFIG_EVENTS
};
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index 34108b3..77a0b26 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -31,6 +31,10 @@
#include <cutils/sched_policy.h>
#include <utils/threads.h>
+#ifdef QCOM_HARDWARE
+#include <media/IDirectTrackClient.h>
+#endif
+
namespace android {
// ----------------------------------------------------------------------------
@@ -39,7 +43,11 @@ class audio_track_cblk_t;
// ----------------------------------------------------------------------------
-class AudioTrack : virtual public RefBase
+class AudioTrack :
+#ifdef QCOM_HARDWARE
+ public BnDirectTrackClient,
+#endif
+ virtual public RefBase
{
public:
enum channel_index {
@@ -451,6 +459,11 @@ public:
*/
status_t dump(int fd, const Vector<String16>& args) const;
+#ifdef QCOM_HARDWARE
+ virtual void notify(int msg);
+ virtual status_t getTimeStamp(uint64_t *tstamp);
+#endif
+
protected:
/* copying audio tracks is not allowed */
AudioTrack(const AudioTrack& other);
@@ -496,6 +509,9 @@ protected:
status_t restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart);
bool stopped_l() const { return !mActive; }
+#ifdef QCOM_HARDWARE
+ sp<IDirectTrack> mDirectTrack;
+#endif
sp<IAudioTrack> mAudioTrack;
sp<IMemory> mCblkMemory;
sp<AudioTrackThread> mAudioTrackThread;
@@ -529,10 +545,17 @@ protected:
uint32_t mUpdatePeriod;
bool mFlushed; // FIXME will be made obsolete by making flush() synchronous
audio_output_flags_t mFlags;
+#ifdef QCOM_HARDWARE
+ sp<IAudioFlinger> mAudioFlinger;
+ audio_io_handle_t mAudioDirectOutput;
+#endif
int mSessionId;
int mAuxEffectId;
mutable Mutex mLock;
status_t mRestoreStatus;
+#ifdef QCOM_HARDWARE
+ void* mObserver;
+#endif
bool mIsTimed;
int mPreviousPriority; // before start()
SchedPolicy mPreviousSchedulingGroup;
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 5170a87..0700a68 100644..100755
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -1,4 +1,8 @@
/*
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
* Copyright (C) 2007 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -24,6 +28,10 @@
#include <utils/RefBase.h>
#include <utils/Errors.h>
#include <binder/IInterface.h>
+#ifdef QCOM_HARDWARE
+#include <media/IDirectTrack.h>
+#include <media/IDirectTrackClient.h>
+#endif
#include <media/IAudioTrack.h>
#include <media/IAudioRecord.h>
#include <media/IAudioFlingerClient.h>
@@ -69,6 +77,21 @@ public:
int *sessionId,
status_t *status) = 0;
+#ifdef QCOM_HARDWARE
+ /* create a direct audio track and registers it with AudioFlinger.
+ * return null if the track cannot be created.
+ */
+ virtual sp<IDirectTrack> createDirectTrack(
+ pid_t pid,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t output,
+ int *sessionId,
+ IDirectTrackClient* client,
+ audio_stream_type_t streamType,
+ status_t *status) = 0;
+#endif
+
virtual sp<IAudioRecord> openRecord(
pid_t pid,
audio_io_handle_t input,
@@ -187,6 +210,9 @@ public:
audio_io_handle_t dstOutput) = 0;
virtual audio_module_handle_t loadHwModule(const char *name) = 0;
+#ifdef QCOM_HARDWARE
+ virtual status_t deregisterClient(const sp<IAudioFlingerClient>& client) { return false; };
+#endif
// helpers for android.media.AudioManager.getProperty(), see description there for meaning
// FIXME move these APIs to AudioPolicy to permit a more accurate implementation
diff --git a/include/media/IDirectTrack.h b/include/media/IDirectTrack.h
new file mode 100644
index 0000000..c1f4f09
--- /dev/null
+++ b/include/media/IDirectTrack.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IDIRECTTRACK_H
+#define ANDROID_IDIRECTTRACK_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class IDirectTrack : public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(DirectTrack);
+
+ /* After it's created the track is not active. Call start() to
+ * make it active. If set, the callback will start being called.
+ */
+ virtual status_t start() = 0;
+
+ /* Stop a track. If set, the callback will cease being called and
+ * obtainBuffer will return an error. Buffers that are already released
+ * will be processed, unless flush() is called.
+ */
+ virtual void stop() = 0;
+
+ /* flush a stopped track. All pending buffers are discarded.
+ * This function has no effect if the track is not stoped.
+ */
+ virtual void flush() = 0;
+
+ /* mute or unmutes this track.
+ * While mutted, the callback, if set, is still called.
+ */
+ virtual void mute(bool) = 0;
+
+ /* Pause a track. If set, the callback will cease being called and
+ * obtainBuffer will return an error. Buffers that are already released
+ * will be processed, unless flush() is called.
+ */
+ virtual void pause() = 0;
+
+ /* set volume for both left and right channels.
+ */
+ virtual void setVolume(float l, float r) = 0;
+
+ virtual ssize_t write(const void*, size_t) = 0;
+
+ virtual int64_t getTimeStamp() = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnDirectTrack : public BnInterface<IDirectTrack>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_IAUDIOTRACK_H
diff --git a/include/media/IDirectTrackClient.h b/include/media/IDirectTrackClient.h
new file mode 100644
index 0000000..9383690
--- /dev/null
+++ b/include/media/IDirectTrackClient.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ * Copyright (C) 2007 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IDIRECTTRACKCLIENT_H
+#define ANDROID_IDIRECTTRACKCLIENT_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class IDirectTrackClient: public IInterface
+{
+public:
+ DECLARE_META_INTERFACE(DirectTrackClient);
+
+ virtual void notify(int msg) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnDirectTrackClient: public BnInterface<IDirectTrackClient>
+{
+public:
+ virtual status_t onTransact( uint32_t code,
+ const Parcel& data,
+ Parcel* reply,
+ uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IDIRECTTRACKCLIENT_H
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index b7bee3f..a7570d6 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -110,6 +110,10 @@ public:
virtual status_t setPlaybackRatePermille(int32_t rate) { return INVALID_OPERATION; }
virtual bool needsTrailingPadding() { return true; }
+#ifdef QCOM_HARDWARE
+ virtual ssize_t sampleRate() const {return 0;};
+ virtual status_t getTimeStamp(uint64_t *tstamp) {return 0;};
+#endif
};
MediaPlayerBase() : mCookie(0), mNotify(0) {}
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index 9fc962c..0df9fd4 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -1,6 +1,7 @@
/*
**
** Copyright 2010, The Android Open Source Project.
+ ** Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -32,8 +33,13 @@ enum camcorder_quality {
CAMCORDER_QUALITY_480P = 4,
CAMCORDER_QUALITY_720P = 5,
CAMCORDER_QUALITY_1080P = 6,
- CAMCORDER_QUALITY_QVGA = 7,
- CAMCORDER_QUALITY_LIST_END = 7,
+ CAMCORDER_QUALITY_QVGA = 11,
+ CAMCORDER_QUALITY_FWVGA = 7,
+ CAMCORDER_QUALITY_WVGA = 8,
+ CAMCORDER_QUALITY_VGA = 9,
+ CAMCORDER_QUALITY_WQVGA = 10,
+
+ CAMCORDER_QUALITY_LIST_END = 11,
CAMCORDER_QUALITY_TIME_LAPSE_LIST_START = 1000,
CAMCORDER_QUALITY_TIME_LAPSE_LOW = 1000,
@@ -456,6 +462,10 @@ private:
static VideoEncoderCap* createDefaultH263VideoEncoderCap();
static VideoEncoderCap* createDefaultM4vVideoEncoderCap();
static AudioEncoderCap* createDefaultAmrNBEncoderCap();
+#ifdef QCOM_HARDWARE
+ static AudioEncoderCap* createDefaultAacEncoderCap();
+ static AudioEncoderCap* createDefaultLpcmEncoderCap();
+#endif
static int findTagForName(const NameToTagMap *map, size_t nMappings, const char *name);
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 6d304e0..6dfa5d9 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -1,5 +1,6 @@
/*
** Copyright (C) 2008 The Android Open Source Project
+ ** Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -67,6 +68,12 @@ enum output_format {
/* H.264/AAC data encapsulated in MPEG2/TS */
OUTPUT_FORMAT_MPEG2TS = 8,
+#ifdef QCOM_HARDWARE
+ OUTPUT_FORMAT_QCP = 9, // QCP file format
+ OUTPUT_FORMAT_THREE_GPP2 = 10, /*3GPP2*/
+ OUTPUT_FORMAT_WAVE = 11, /*WAVE*/
+#endif
+
OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
};
@@ -77,6 +84,11 @@ enum audio_encoder {
AUDIO_ENCODER_AAC = 3,
AUDIO_ENCODER_HE_AAC = 4,
AUDIO_ENCODER_AAC_ELD = 5,
+#ifdef QCOM_HARDWARE
+ AUDIO_ENCODER_EVRC = 6,
+ AUDIO_ENCODER_QCELP = 7,
+ AUDIO_ENCODER_LPCM = 8,
+#endif
AUDIO_ENCODER_LIST_END // must be the last - used to validate the audio encoder type
};
@@ -118,6 +130,9 @@ enum media_recorder_event_type {
MEDIA_RECORDER_EVENT_LIST_START = 1,
MEDIA_RECORDER_EVENT_ERROR = 1,
MEDIA_RECORDER_EVENT_INFO = 2,
+#ifdef QCOM_HARDWARE
+ MEDIA_RECORDER_MSG_COMPRESSED_IMAGE = 8, // mzhu: TODO, where to put this?
+#endif
MEDIA_RECORDER_EVENT_LIST_END = 99,
// Track related event types
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index df1c46b..b42a4ac 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -190,6 +190,9 @@ private:
status_t freeBuffer(OMX_U32 portIndex, size_t i);
status_t allocateOutputBuffersFromNativeWindow();
+#ifdef USE_SAMSUNG_COLORFORMAT
+ void setNativeWindowColorFormat(OMX_COLOR_FORMATTYPE &eNativeColorFormat);
+#endif
status_t cancelBufferToNativeWindow(BufferInfo *info);
status_t freeOutputBuffersNotOwnedByComponent();
BufferInfo *dequeueBufferFromNativeWindow();
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 1dc408f..624fe3e 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -43,27 +43,27 @@ public:
virtual ~AudioPlayer();
// Caller retains ownership of "source".
- void setSource(const sp<MediaSource> &source);
+ virtual void setSource(const sp<MediaSource> &source);
// Return time in us.
virtual int64_t getRealTimeUs();
- status_t start(bool sourceAlreadyStarted = false);
+ virtual status_t start(bool sourceAlreadyStarted = false);
- void pause(bool playPendingSamples = false);
- void resume();
+ virtual void pause(bool playPendingSamples = false);
+ virtual void resume();
// Returns the timestamp of the last buffer played (in us).
- int64_t getMediaTimeUs();
+ virtual int64_t getMediaTimeUs();
// Returns true iff a mapping is established, i.e. the AudioPlayer
// has played at least one frame of audio.
- bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
+ virtual bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
- status_t seekTo(int64_t time_us);
+ virtual status_t seekTo(int64_t time_us);
- bool isSeeking();
- bool reachedEOS(status_t *finalStatus);
+ virtual bool isSeeking();
+ virtual bool reachedEOS(status_t *finalStatus);
status_t setPlaybackRatePermille(int32_t ratePermille);
@@ -91,6 +91,9 @@ private:
int64_t mSeekTimeUs;
bool mStarted;
+#ifdef QCOM_HARDWARE
+ bool mSourcePaused;
+#endif
bool mIsFirstBuffer;
status_t mFirstBufferResult;
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 99f3c3b..4489254 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -85,6 +85,7 @@ private:
int64_t mInitialReadTimeUs;
int64_t mNumFramesReceived;
int64_t mNumClientOwnedBuffers;
+ int64_t mAutoRampStartUs;
List<MediaBuffer * > mBuffersReceived;
diff --git a/include/media/stagefright/ExtendedWriter.h b/include/media/stagefright/ExtendedWriter.h
new file mode 100644
index 0000000..23944b0
--- /dev/null
+++ b/include/media/stagefright/ExtendedWriter.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef EXTENDED_WRITER_H_
+
+#define EXTENDED_WRITER_H_
+
+#include <stdio.h>
+
+#include <media/stagefright/MediaWriter.h>
+#include <utils/threads.h>
+#include <cutils/log.h>
+
+#define LITERAL_TO_STRING_INTERNAL(x) #x
+#define LITERAL_TO_STRING(x) LITERAL_TO_STRING_INTERNAL(x)
+
+#define CHECK_EQ(x,y) \
+ LOG_ALWAYS_FATAL_IF( \
+ (x) != (y), \
+ __FILE__ ":" LITERAL_TO_STRING(__LINE__) " " #x " != " #y)
+
+#define CHECK(x) \
+ LOG_ALWAYS_FATAL_IF( \
+ !(x), \
+ __FILE__ ":" LITERAL_TO_STRING(__LINE__) " " #x)
+
+namespace android {
+
+struct MediaSource;
+struct MetaData;
+
+struct ExtendedWriter : public MediaWriter {
+ ExtendedWriter(const char *filename);
+ ExtendedWriter(int fd);
+
+ status_t initCheck() const;
+
+ virtual status_t addSource(const sp<MediaSource> &source);
+ virtual bool reachedEOS();
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual status_t pause();
+
+protected:
+ virtual ~ExtendedWriter();
+
+private:
+ FILE *mFile;
+ status_t mInitCheck;
+ sp<MediaSource> mSource;
+ bool mStarted;
+ volatile bool mPaused;
+ volatile bool mResumed;
+ volatile bool mDone;
+ volatile bool mReachedEOS;
+ pthread_t mThread;
+ int64_t mEstimatedSizeBytes;
+ int64_t mEstimatedDurationUs;
+
+ int32_t mFormat;
+
+ //QCP/EVRC header
+ struct QCPEVRCHeader
+ {
+ /* RIFF Section */
+ char riff[4];
+ unsigned int s_riff;
+ char qlcm[4];
+
+ /* Format chunk */
+ char fmt[4];
+ unsigned int s_fmt;
+ char mjr;
+ char mnr;
+ unsigned int data1;
+
+ /* UNIQUE ID of the codec */
+ unsigned short data2;
+ unsigned short data3;
+ char data4[8];
+ unsigned short ver;
+
+ /* Codec Info */
+ char name[80];
+ unsigned short abps;
+
+ /* average bits per sec of the codec */
+ unsigned short bytes_per_pkt;
+ unsigned short samp_per_block;
+ unsigned short samp_per_sec;
+ unsigned short bits_per_samp;
+ unsigned char vr_num_of_rates;
+
+ /* Rate Header fmt info */
+ unsigned char rvd1[3];
+ unsigned short vr_bytes_per_pkt[8];
+ unsigned int rvd2[5];
+
+ /* Vrat chunk */
+ unsigned char vrat[4];
+ unsigned int s_vrat;
+ unsigned int v_rate;
+ unsigned int size_in_pkts;
+
+ /* Data chunk */
+ unsigned char data[4];
+ unsigned int s_data;
+ } __attribute__ ((packed));
+
+ struct QCPEVRCHeader mHeader;
+ off_t mOffset; //note off_t
+
+ static void *ThreadWrapper(void *);
+ status_t threadFunc();
+ bool exceedsFileSizeLimit();
+ bool exceedsFileDurationLimit();
+
+ ExtendedWriter(const ExtendedWriter &);
+ ExtendedWriter &operator=(const ExtendedWriter &);
+
+ status_t writeQCPHeader( );
+ status_t writeEVRCHeader( );
+};
+
+} // namespace android
+
+#endif // AMR_WRITER_H_
diff --git a/include/media/stagefright/FMRadioSource.h b/include/media/stagefright/FMRadioSource.h
new file mode 100644
index 0000000..32db156
--- /dev/null
+++ b/include/media/stagefright/FMRadioSource.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Author: Stefan Ekenberg (stefan.ekenberg@stericsson.com) for ST-Ericsson
+ */
+
+#ifndef FMRADIO_SOURCE_H_
+
+#define FMRADIO_SOURCE_H_
+
+#include <media/AudioRecord.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <system/audio.h>
+
+namespace android {
+
+class FMRadioSource : public DataSource {
+public:
+ FMRadioSource();
+
+ virtual status_t initCheck() const;
+ virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+ virtual status_t getSize(off64_t *size);
+
+protected:
+ virtual ~FMRadioSource();
+
+private:
+ struct Buffer {
+ size_t frameCount;
+ size_t size;
+ int8_t* data;
+ };
+
+ status_t openRecord(int frameCount, audio_io_handle_t input);
+ status_t obtainBuffer(Buffer* audioBuffer);
+
+ status_t mInitCheck;
+ bool mStarted;
+ int mSessionId;
+ sp<IAudioRecord> mAudioRecord;
+ sp<IMemory> mCblkMemory;
+ audio_track_cblk_t* mCblk;
+
+ DISALLOW_EVIL_CONSTRUCTORS(FMRadioSource);
+};
+
+} // namespace android
+
+#endif // FMRADIO_SOURCE_H_
diff --git a/include/media/stagefright/LPAPlayer.h b/include/media/stagefright/LPAPlayer.h
new file mode 100755
index 0000000..b0e1d31
--- /dev/null
+++ b/include/media/stagefright/LPAPlayer.h
@@ -0,0 +1,291 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LPA_PLAYER_H_
+
+#define LPA_PLAYER_H_
+
+#include "AudioPlayer.h"
+#include <media/IAudioFlinger.h>
+#include <utils/threads.h>
+#include <utils/List.h>
+#include <utils/Vector.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <binder/IServiceManager.h>
+#include <linux/unistd.h>
+#include <include/TimedEventQueue.h>
+#include <binder/BinderService.h>
+#include <binder/MemoryDealer.h>
+#include <powermanager/IPowerManager.h>
+
+// Pause timeout = 3sec
+#define LPA_PAUSE_TIMEOUT_USEC 3000000
+
+namespace android {
+
+class LPAPlayer : public AudioPlayer {
+public:
+ enum {
+ REACHED_EOS,
+ SEEK_COMPLETE
+ };
+
+ enum {
+ TRACK_DIRECT,
+ TRACK_REGULAR,
+ TRACK_NONE
+ };
+
+ LPAPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink, bool &initCheck,
+ AwesomePlayer *audioObserver = NULL);
+
+ virtual ~LPAPlayer();
+
+ // Caller retains ownership of "source".
+ virtual void setSource(const sp<MediaSource> &source);
+
+ // Return time in us.
+ virtual int64_t getRealTimeUs();
+
+ virtual status_t start(bool sourceAlreadyStarted = false);
+
+ virtual void pause(bool playPendingSamples = false);
+ virtual void resume();
+
+ // Returns the timestamp of the last buffer played (in us).
+ virtual int64_t getMediaTimeUs();
+
+ // Returns true iff a mapping is established, i.e. the LPAPlayer
+ // has played at least one frame of audio.
+ virtual bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
+
+ virtual status_t seekTo(int64_t time_us);
+
+ virtual bool isSeeking();
+ virtual bool reachedEOS(status_t *finalStatus);
+
+ static int objectsAlive;
+ static bool mLpaInProgress;
+private:
+ int64_t mPositionTimeMediaUs;
+ int64_t mPositionTimeRealUs;
+ bool mInternalSeeking;
+ bool mIsAudioRouted;
+ bool mStarted;
+ bool mPaused;
+ bool mA2DPEnabled;
+ int32_t mChannelMask;
+ int32_t numChannels;
+ int32_t mSampleRate;
+ int64_t mLatencyUs;
+ size_t mFrameSize;
+ int64_t mTimeStarted;
+ int64_t mTimePlayed;
+ int64_t mNumFramesPlayed;
+ int64_t mNumFramesPlayedSysTimeUs;
+ int64_t mNumA2DPBytesPlayed;
+
+ void clearPowerManager();
+
+ class PMDeathRecipient : public IBinder::DeathRecipient {
+ public:
+ PMDeathRecipient(void *obj){parentClass = (LPAPlayer *)obj;}
+ virtual ~PMDeathRecipient() {}
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ LPAPlayer *parentClass;
+ PMDeathRecipient(const PMDeathRecipient&);
+ PMDeathRecipient& operator = (const PMDeathRecipient&);
+
+ friend class LPAPlayer;
+ };
+
+ friend class PMDeathRecipient;
+
+ void acquireWakeLock();
+ void releaseWakeLock();
+
+ sp<IPowerManager> mPowerManager;
+ sp<IBinder> mWakeLockToken;
+ sp<PMDeathRecipient> mDeathRecipient;
+
+ pthread_t decoderThread;
+
+ pthread_t A2DPNotificationThread;
+
+ //Kill Thread boolean
+ bool killDecoderThread;
+
+
+
+ bool killA2DPNotificationThread;
+
+ //Thread alive boolean
+ bool decoderThreadAlive;
+
+
+ bool a2dpNotificationThreadAlive;
+
+ //Declare the condition Variables and Mutex
+
+ pthread_mutex_t decoder_mutex;
+
+ pthread_mutex_t audio_sink_setup_mutex;
+
+ pthread_mutex_t a2dp_notification_mutex;
+
+
+
+ pthread_cond_t decoder_cv;
+
+
+ pthread_cond_t a2dp_notification_cv;
+
+
+ // make sure Decoder thread has exited
+ void requestAndWaitForDecoderThreadExit();
+
+
+ // make sure the Effects thread also exited
+ void requestAndWaitForA2DPNotificationThreadExit();
+
+ static void *decoderThreadWrapper(void *me);
+ void decoderThreadEntry();
+ static void *A2DPNotificationThreadWrapper(void *me);
+ void A2DPNotificationThreadEntry();
+
+ void createThreads();
+
+ volatile bool mIsA2DPEnabled;
+
+ //Structure to recieve the BT notification from the flinger.
+ class AudioFlingerLPAdecodeClient: public IBinder::DeathRecipient, public BnAudioFlingerClient {
+ public:
+ AudioFlingerLPAdecodeClient(void *obj);
+
+ LPAPlayer *pBaseClass;
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ // IAudioFlingerClient
+
+ // indicate a change in the configuration of an output or input: keeps the cached
+ // values for output/input parameters upto date in client process
+ virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+
+ friend class LPAPlayer;
+ };
+
+ sp<IAudioFlinger> mAudioFlinger;
+
+ // helper function to obtain AudioFlinger service handle
+ void getAudioFlinger();
+
+ void handleA2DPSwitch();
+ void onPauseTimeOut();
+
+ sp<AudioFlingerLPAdecodeClient> AudioFlingerClient;
+ friend class AudioFlingerLPAdecodeClient;
+ Mutex AudioFlingerLock;
+ sp<MediaSource> mSource;
+
+ MediaBuffer *mInputBuffer;
+
+ Mutex mLock;
+ Mutex mResumeLock;
+
+ bool mSeeking;
+ bool mReachedEOS;
+ bool mReachedOutputEOS;
+ status_t mFinalStatus;
+ int64_t mSeekTimeUs;
+ int64_t mPauseTime;
+
+
+ bool mIsFirstBuffer;
+ status_t mFirstBufferResult;
+ MediaBuffer *mFirstBuffer;
+ TimedEventQueue mQueue;
+ bool mQueueStarted;
+ sp<TimedEventQueue::Event> mPauseEvent;
+ bool mPauseEventPending;
+
+ sp<MediaPlayerBase::AudioSink> mAudioSink;
+ AwesomePlayer *mObserver;
+ int mTrackType;
+
+ static size_t AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *data, size_t size, void *me);
+
+ enum A2DPState {
+ A2DP_ENABLED,
+ A2DP_DISABLED,
+ A2DP_CONNECT,
+ A2DP_DISCONNECT
+ };
+
+ int64_t getTimeStamp(A2DPState state);
+
+ size_t fillBuffer(void *data, size_t size);
+
+ int64_t getRealTimeUsLocked();
+
+ void reset();
+
+ status_t setupAudioSink();
+ static size_t AudioCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *buffer, size_t size, void *cookie);
+ size_t AudioCallback(void *cookie, void *data, size_t size);
+
+ LPAPlayer(const LPAPlayer &);
+ LPAPlayer &operator=(const LPAPlayer &);
+};
+
+struct TimedEvent : public TimedEventQueue::Event {
+ TimedEvent(LPAPlayer *player,
+ void (LPAPlayer::*method)())
+ : mPlayer(player),
+ mMethod(method) {
+ }
+
+protected:
+ virtual ~TimedEvent() {}
+
+ virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
+ (mPlayer->*mMethod)();
+ }
+
+private:
+ LPAPlayer *mPlayer;
+ void (LPAPlayer::*mMethod)();
+
+ TimedEvent(const TimedEvent &);
+ TimedEvent &operator=(const TimedEvent &);
+};
+
+} // namespace android
+
+#endif // LPA_PLAYER_H_
+
diff --git a/include/media/stagefright/MediaDebug.h b/include/media/stagefright/MediaDebug.h
new file mode 100644
index 0000000..bcaeeba
--- /dev/null
+++ b/include/media/stagefright/MediaDebug.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_DEBUG_H_
+
+#define MEDIA_DEBUG_H_
+
+#include <cutils/log.h>
+
+#define LITERAL_TO_STRING_INTERNAL(x) #x
+#define LITERAL_TO_STRING(x) LITERAL_TO_STRING_INTERNAL(x)
+
+#define CHECK_EQ(x,y) \
+ LOG_ALWAYS_FATAL_IF( \
+ (x) != (y), \
+ __FILE__ ":" LITERAL_TO_STRING(__LINE__) " " #x " != " #y)
+
+#define CHECK(x) \
+ LOG_ALWAYS_FATAL_IF( \
+ !(x), \
+ __FILE__ ":" LITERAL_TO_STRING(__LINE__) " " #x)
+
+#endif // MEDIA_DEBUG_H_
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index 457d5d7..f63926c 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -31,6 +31,7 @@ extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
+extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS;
extern const char *MEDIA_MIMETYPE_AUDIO_MPEG; // layer III
extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I;
extern const char *MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II;
diff --git a/include/media/stagefright/MediaExtractor.h b/include/media/stagefright/MediaExtractor.h
index 94090ee..a458284 100644
--- a/include/media/stagefright/MediaExtractor.h
+++ b/include/media/stagefright/MediaExtractor.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -49,6 +50,7 @@ public:
CAN_SEEK_FORWARD = 2, // the "seek 10secs forward button"
CAN_PAUSE = 4,
CAN_SEEK = 8, // the "seek bar"
+ CAN_SEEK_TO_ZERO = 16, // the "previous button"
};
// If subclasses do _not_ override this, the default is
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 583c3b3..bdd35a4 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,6 +23,9 @@
#include <media/IOMX.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaSource.h>
+#ifdef QCOM_HARDWARE
+#include <media/stagefright/QCOMXCodec.h>
+#endif
#include <utils/threads.h>
#include <OMX_Audio.h>
@@ -100,6 +104,13 @@ struct OMXCodec : public MediaSource,
kSupportsMultipleFramesPerInputBuffer = 1024,
kRequiresLargerEncoderOutputBuffer = 2048,
kOutputBuffersAreUnreadable = 4096,
+#ifdef QCOM_HARDWARE
+ kRequiresGlobalFlush = 0x20000000, // 2^29
+ kRequiresWMAProComponent = 0x40000000, //2^30
+#endif
+#if defined(OMAP_ENHANCEMENT)
+ kAvoidMemcopyInputRecordingFrames = 0x20000000,
+#endif
};
struct CodecNameAndQuirks {
@@ -127,6 +138,11 @@ private:
// Make sure mLock is accessible to OMXCodecObserver
friend class OMXCodecObserver;
+#ifdef QCOM_HARDWARE
+ // QCOMXCodec can access variables of OMXCodec
+ friend class QCOMXCodec;
+#endif
+
// Call this with mLock hold
void on_message(const omx_message &msg);
@@ -143,6 +159,9 @@ private:
};
enum {
+#ifdef QCOM_HARDWARE
+ kPortIndexBoth = -1,
+#endif
kPortIndexInput = 0,
kPortIndexOutput = 1
};
@@ -250,6 +269,11 @@ private:
void setG711Format(int32_t numChannels);
+#ifdef QCOM_HARDWARE
+ void setEVRCFormat( int32_t sampleRate, int32_t numChannels, int32_t bitRate);
+ void setQCELPFormat( int32_t sampleRate, int32_t numChannels, int32_t bitRate);
+#endif
+
status_t setVideoPortFormatType(
OMX_U32 portIndex,
OMX_VIDEO_CODINGTYPE compressionFormat,
@@ -291,6 +315,9 @@ private:
status_t allocateBuffers();
status_t allocateBuffersOnPort(OMX_U32 portIndex);
+#ifdef USE_SAMSUNG_COLORFORMAT
+ void setNativeWindowColorFormat(OMX_COLOR_FORMATTYPE &eNativeColorFormat);
+#endif
status_t allocateOutputBuffersFromNativeWindow();
status_t queueBufferToNativeWindow(BufferInfo *info);
@@ -344,6 +371,9 @@ private:
void dumpPortStatus(OMX_U32 portIndex);
status_t configureCodec(const sp<MetaData> &meta);
+#if defined(OMAP_ENHANCEMENT)
+ void restorePatchedDataPointer(BufferInfo *info);
+#endif
status_t applyRotation();
status_t waitForBufferFilled_l();
@@ -358,6 +388,11 @@ private:
OMXCodec(const OMXCodec &);
OMXCodec &operator=(const OMXCodec &);
+
+#ifdef QCOM_HARDWARE
+ status_t setWMAFormat(const sp<MetaData> &inputFormat);
+ void setAC3Format(int32_t numChannels, int32_t sampleRate);
+#endif
};
struct CodecCapabilities {
diff --git a/include/media/stagefright/QCOMXCodec.h b/include/media/stagefright/QCOMXCodec.h
new file mode 100644
index 0000000..485c187
--- /dev/null
+++ b/include/media/stagefright/QCOMXCodec.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef QC_OMX_CODEC_H_
+
+#define QC_OMX_CODEC_H_
+
+#include <android/native_window.h>
+#include <media/IOMX.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/foundation/AString.h>
+#include <utils/threads.h>
+
+#include <OMX_Audio.h>
+
+namespace android {
+
+struct MediaCodecList;
+struct OMXCodec;
+
+enum{
+ kRequiresWMAProComponent = 0x40000000,
+};
+
+
+struct QCOMXCodec {
+
+ static uint32_t getQCComponentQuirks(const MediaCodecList *list, size_t index);
+
+ static status_t configureDIVXCodec(const sp<MetaData> &meta, char* mime,
+ sp<IOMX> OMXhandle,IOMX::node_id nodeID, int port_index);
+
+ static status_t setQCFormat(const sp<MetaData> &meta, char* mime,
+ sp<IOMX> OMXhandle,IOMX::node_id nodeID,
+ OMXCodec *handle, bool isEncoder);
+
+ static status_t setWMAFormat(const sp<MetaData> &meta, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, bool isEncoder );
+
+ static status_t setQCVideoInputFormat(const char *mime,
+ OMX_VIDEO_CODINGTYPE *compressionFormat);
+
+ static status_t setQCVideoOutputFormat(const char *mime,
+ OMX_VIDEO_CODINGTYPE *compressionFormat);
+
+ static status_t checkQCFormats(int format, AString* meta);
+
+ static void setASFQuirks(uint32_t quirks, const sp<MetaData> &meta,
+ const char* componentName);
+
+ static void checkAndAddRawFormat(OMXCodec *handle, const sp<MetaData> &meta);
+
+ static void setEVRCFormat(int32_t numChannels, int32_t sampleRate,
+ sp<IOMX> OMXhandle, IOMX::node_id nodeID,
+ OMXCodec *handle, bool isEncoder );
+
+ static void setQCELPFormat(int32_t numChannels, int32_t sampleRate,
+ sp<IOMX> OMXhandle, IOMX::node_id nodeID,
+ OMXCodec *handle, bool isEncoder );
+
+ static void setAC3Format(int32_t numChannels, int32_t sampleRate,
+ sp<IOMX> OMXhandle, IOMX::node_id nodeID);
+
+ static void checkQCRole(const sp<IOMX> &omx, IOMX::node_id node,
+ bool isEncoder,const char *mime);
+
+ static void setQCSpecificVideoFormat(const sp<MetaData> &meta, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, char* componentName );
+
+};
+
+}
+#endif /*QC_OMX_CODEC_H_ */
+
diff --git a/include/media/stagefright/TunnelPlayer.h b/include/media/stagefright/TunnelPlayer.h
new file mode 100644
index 0000000..04cc750
--- /dev/null
+++ b/include/media/stagefright/TunnelPlayer.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TUNNEL_PLAYER_H_
+
+#define TUNNEL_PLAYER_H_
+
+#include "AudioPlayer.h"
+#include <media/IAudioFlinger.h>
+#include <utils/threads.h>
+#include <utils/List.h>
+#include <utils/Vector.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <binder/IServiceManager.h>
+#include <linux/unistd.h>
+#include <include/TimedEventQueue.h>
+#include <binder/BinderService.h>
+#include <binder/MemoryDealer.h>
+#include <powermanager/IPowerManager.h>
+
+// Pause timeout = 3sec
+#define TUNNEL_PAUSE_TIMEOUT_USEC 3000000
+namespace android {
+
+class TunnelPlayer : public AudioPlayer {
+public:
+ enum {
+ REACHED_EOS,
+ SEEK_COMPLETE
+ };
+
+ TunnelPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink, bool &initCheck,
+ AwesomePlayer *audioObserver = NULL, bool hasVideo = false);
+
+ virtual ~TunnelPlayer();
+
+ // Caller retains ownership of "source".
+ virtual void setSource(const sp<MediaSource> &source);
+
+ // Return time in us.
+ virtual int64_t getRealTimeUs();
+
+ virtual status_t start(bool sourceAlreadyStarted = false);
+
+ virtual void pause(bool playPendingSamples = false);
+ virtual void resume();
+
+ // Returns the timestamp of the last buffer played (in us).
+ virtual int64_t getMediaTimeUs();
+
+ // Returns true iff a mapping is established, i.e. the TunnelPlayer
+ // has played at least one frame of audio.
+ virtual bool getMediaTimeMapping(int64_t *realtime_us, int64_t *mediatime_us);
+
+ virtual status_t seekTo(int64_t time_us);
+
+ virtual bool isSeeking();
+ virtual bool reachedEOS(status_t *finalStatus);
+
+
+ static int mTunnelObjectsAlive;
+private:
+ int64_t mPositionTimeMediaUs;
+ int64_t mPositionTimeRealUs;
+ bool mInternalSeeking;
+ bool mIsAudioRouted;
+ bool mStarted;
+ bool mPaused;
+ bool mA2DPEnabled;
+ int32_t mChannelMask;
+ int32_t numChannels;
+ int32_t mSampleRate;
+ int64_t mLatencyUs;
+ size_t mFrameSize;
+ int64_t mNumFramesPlayed;
+ int64_t mNumFramesPlayedSysTimeUs;
+ audio_format_t mFormat;
+ bool mHasVideo;
+ void clearPowerManager();
+
+ class PMDeathRecipient : public IBinder::DeathRecipient {
+ public:
+ PMDeathRecipient(void *obj){parentClass = (TunnelPlayer *)obj;}
+ virtual ~PMDeathRecipient() {}
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ TunnelPlayer *parentClass;
+ PMDeathRecipient(const PMDeathRecipient&);
+ PMDeathRecipient& operator = (const PMDeathRecipient&);
+
+ friend class TunnelPlayer;
+ };
+
+ friend class PMDeathRecipient;
+
+ void acquireWakeLock();
+ void releaseWakeLock();
+
+ sp<IPowerManager> mPowerManager;
+ sp<IBinder> mWakeLockToken;
+ sp<PMDeathRecipient> mDeathRecipient;
+
+ pthread_t extractorThread;
+
+ //Kill Thread boolean
+ bool killExtractorThread;
+
+ //Thread alive boolean
+ bool extractorThreadAlive;
+
+
+ //Declare the condition Variables and Mutex
+
+ pthread_mutex_t extractor_mutex;
+ pthread_cond_t extractor_cv;
+
+
+ // make sure Decoder thread has exited
+ void requestAndWaitForExtractorThreadExit();
+
+
+ static void *extractorThreadWrapper(void *me);
+ void extractorThreadEntry();
+
+ void createThreads();
+
+ volatile bool mIsA2DPEnabled;
+
+ //Structure to recieve the BT notification from the flinger.
+ class AudioFlingerTunneldecodeClient: public IBinder::DeathRecipient, public BnAudioFlingerClient {
+ public:
+ AudioFlingerTunneldecodeClient(void *obj);
+
+ TunnelPlayer *pBaseClass;
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ // IAudioFlingerClient
+
+ // indicate a change in the configuration of an output or input: keeps the cached
+ // values for output/input parameters upto date in client process
+ virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+
+ friend class TunnelPlayer;
+ };
+
+ sp<IAudioFlinger> mAudioFlinger;
+
+ // helper function to obtain AudioFlinger service handle
+ void getAudioFlinger();
+ void onPauseTimeOut();
+
+ sp<AudioFlingerTunneldecodeClient> mAudioFlingerClient;
+ friend class AudioFlingerTunneldecodeClient;
+ Mutex mAudioFlingerLock;
+ sp<MediaSource> mSource;
+
+ MediaBuffer *mInputBuffer;
+
+ Mutex pmLock;
+ Mutex mLock;
+
+ bool mSeeking;
+ bool mReachedEOS;
+ bool mReachedOutputEOS;
+ status_t mFinalStatus;
+ int64_t mSeekTimeUs;
+ int64_t mPauseTime;
+
+
+ bool mIsFirstBuffer;
+ status_t mFirstBufferResult;
+ MediaBuffer *mFirstBuffer;
+ TimedEventQueue mQueue;
+ bool mQueueStarted;
+ sp<TimedEventQueue::Event> mPauseEvent;
+ bool mPauseEventPending;
+
+ sp<MediaPlayerBase::AudioSink> mAudioSink;
+ AwesomePlayer *mObserver;
+
+ static size_t AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *data, size_t size, void *me);
+
+ enum A2DPState {
+ A2DP_ENABLED,
+ A2DP_DISABLED,
+ A2DP_CONNECT,
+ A2DP_DISCONNECT
+ };
+
+ void getPlayedTimeFromDSP_l(int64_t *timeStamp);
+ void getOffsetRealTime_l(int64_t *offsetTime);
+
+ size_t fillBuffer(void *data, size_t size);
+
+ void reset();
+
+ TunnelPlayer(const TunnelPlayer &);
+ TunnelPlayer &operator=(const TunnelPlayer &);
+};
+
+struct TunnelEvent : public TimedEventQueue::Event {
+ TunnelEvent(TunnelPlayer *player,
+ void (TunnelPlayer::*method)())
+ : mPlayer(player),
+ mMethod(method) {
+ }
+
+protected:
+ virtual ~TunnelEvent() {}
+
+ virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) {
+ (mPlayer->*mMethod)();
+ }
+
+private:
+ TunnelPlayer *mPlayer;
+ void (TunnelPlayer::*mMethod)();
+
+ TunnelEvent(const TunnelEvent &);
+ TunnelEvent &operator=(const TunnelEvent &);
+};
+
+} // namespace android
+
+#endif // LPA_PLAYER_H_
diff --git a/include/media/stagefright/WAVEWriter.h b/include/media/stagefright/WAVEWriter.h
new file mode 100644
index 0000000..766d8f4
--- /dev/null
+++ b/include/media/stagefright/WAVEWriter.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef WAVE_WRITER_H_
+
+#define WAVE_WRITER_H_
+
+#include <stdio.h>
+
+#include <media/stagefright/MediaWriter.h>
+#include <utils/threads.h>
+
+namespace android {
+
+
+#define ID_RIFF 0x46464952
+#define ID_WAVE 0x45564157
+#define ID_FMT 0x20746d66
+#define ID_DATA 0x61746164
+#define FORMAT_PCM 1
+
+
+struct MediaSource;
+struct MetaData;
+
+struct wav_header {
+ uint32_t riff_id;
+ uint32_t riff_sz;
+ uint32_t riff_fmt;
+ uint32_t fmt_id;
+ uint32_t fmt_sz;
+ uint16_t audio_format;
+ uint16_t num_channels;
+ uint32_t sample_rate;
+ uint32_t byte_rate; /* sample_rate * num_channels * bps / 8 */
+ uint16_t block_align; /* num_channels * bps / 8 */
+ uint16_t bits_per_sample;
+ uint32_t data_id;
+ uint32_t data_sz;
+};
+
+
+struct WAVEWriter : public MediaWriter {
+ WAVEWriter(const char *filename);
+ WAVEWriter(int fd);
+
+ status_t initCheck() const;
+
+ virtual status_t addSource(const sp<MediaSource> &source);
+ virtual bool reachedEOS();
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual status_t pause();
+
+protected:
+ virtual ~WAVEWriter();
+
+private:
+ int mFd;
+ status_t mInitCheck;
+ sp<MediaSource> mSource;
+ bool mStarted;
+ volatile bool mPaused;
+ volatile bool mResumed;
+ volatile bool mDone;
+ volatile bool mReachedEOS;
+ pthread_t mThread;
+ int64_t mEstimatedSizeBytes;
+ int64_t mEstimatedDurationUs;
+
+ static void *ThreadWrapper(void *);
+ status_t threadFunc();
+ bool exceedsFileSizeLimit();
+ bool exceedsFileDurationLimit();
+
+ WAVEWriter(const WAVEWriter &);
+ WAVEWriter &operator=(const WAVEWriter &);
+};
+
+} // namespace android
+
+#endif // WAVE_WRITER_H_
diff --git a/libvideoeditor/lvpp/Android.mk b/libvideoeditor/lvpp/Android.mk
index 0ed7e6c..921f161 100755
--- a/libvideoeditor/lvpp/Android.mk
+++ b/libvideoeditor/lvpp/Android.mk
@@ -80,7 +80,6 @@ LOCAL_C_INCLUDES += \
$(TOP)/frameworks/av/libvideoeditor/lvpp \
$(TOP)/frameworks/av/services/audioflinger \
$(TOP)/frameworks/native/include/media/editor \
- $(TOP)/frameworks/native/include/media/openmax \
$(TOP)/frameworks/native/services/audioflinger
@@ -100,6 +99,12 @@ LOCAL_CFLAGS += -Wno-multichar \
-DUSE_STAGEFRIGHT_READERS \
-DUSE_STAGEFRIGHT_3GPP_READER
+ifneq ($(TI_CUSTOM_DOMX_PATH),)
+LOCAL_C_INCLUDES += $(TI_CUSTOM_DOMX_PATH)/omx_core/inc
+else
+LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
+endif
+
include $(BUILD_SHARED_LIBRARY)
#include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 54666fb..6c37487 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -11,6 +11,17 @@ include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+LOCAL_SRC_FILES:= AudioParameter.cpp
+LOCAL_MODULE:= libaudioparameter
+LOCAL_MODULE_TAGS := optional
+LOCAL_SHARED_LIBRARIES := libutils
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+endif
+
LOCAL_SRC_FILES:= \
AudioTrack.cpp \
IAudioFlinger.cpp \
@@ -51,6 +62,25 @@ LOCAL_SRC_FILES:= \
SoundPool.cpp \
SoundPoolThread.cpp
+ifeq ($(BOARD_USES_LIBMEDIA_WITH_AUDIOPARAMETER),true)
+LOCAL_SRC_FILES+= \
+ AudioParameter.cpp
+endif
+
+ifeq ($(BOARD_USE_SAMSUNG_SEPARATEDSTREAM),true)
+LOCAL_CFLAGS += -DUSE_SAMSUNG_SEPARATEDSTREAM
+endif
+
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+LOCAL_SRC_FILES += \
+ IDirectTrack.cpp \
+ IDirectTrackClient.cpp
+
+ifeq ($(TARGET_QCOM_AUDIO_VARIANT),caf)
+LOCAL_CFLAGS += -DQCOM_ENHANCED_AUDIO
+endif
+endif
+
LOCAL_SHARED_LIBRARIES := \
libui libcutils libutils libbinder libsonivox libicuuc libexpat \
libcamera_client libstagefright_foundation \
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index e3fea77..fbb34f4 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2006-2011 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,6 +33,13 @@ const char * const AudioParameter::keyChannels = AUDIO_PARAMETER_STREAM_CHANNELS
const char * const AudioParameter::keyFrameCount = AUDIO_PARAMETER_STREAM_FRAME_COUNT;
const char * const AudioParameter::keyInputSource = AUDIO_PARAMETER_STREAM_INPUT_SOURCE;
const char * const AudioParameter::keyScreenState = AUDIO_PARAMETER_KEY_SCREEN_STATE;
+#ifdef QCOM_HARDWARE
+const char * const AudioParameter::keyHandleFm = AUDIO_PARAMETER_KEY_HANDLE_FM;
+const char * const AudioParameter::keyVoipCheck = AUDIO_PARAMETER_KEY_VOIP_CHECK;
+const char * const AudioParameter::keyFluenceType = AUDIO_PARAMETER_KEY_FLUENCE_TYPE;
+const char * const AudioParameter::keySSR = AUDIO_PARAMETER_KEY_SSR;
+const char * const AudioParameter::keyHandleA2dpDevice = AUDIO_PARAMETER_KEY_HANDLE_A2DP_DEVICE;
+#endif
AudioParameter::AudioParameter(const String8& keyValuePairs)
{
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 8ea6306..2725b5b 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -1,6 +1,7 @@
/*
**
** Copyright 2008, The Android Open Source Project
+** Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -62,7 +63,12 @@ status_t AudioRecord::getMinFrameCount(
// We double the size of input buffer for ping pong use of record buffer.
size <<= 1;
- if (audio_is_linear_pcm(format)) {
+#ifdef QCOM_ENHANCED_AUDIO
+ if (audio_is_linear_pcm(format) || format == AUDIO_FORMAT_AMR_WB)
+#else
+ if (audio_is_linear_pcm(format))
+#endif
+ {
int channelCount = popcount(channelMask);
size /= channelCount * audio_bytes_per_sample(format);
}
@@ -156,7 +162,11 @@ status_t AudioRecord::set(
return BAD_VALUE;
}
+#ifdef QCOM_HARDWARE
+ int channelCount = popcount((channelMask) & (AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_MONO | AUDIO_CHANNEL_IN_5POINT1));
+#else
int channelCount = popcount(channelMask);
+#endif
if (sessionId == 0 ) {
mSessionId = AudioSystem::newAudioSessionId();
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 207f96f..fd8c320 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -561,6 +561,12 @@ audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t d
return aps->getDeviceConnectionState(device, device_address);
}
+extern "C" audio_policy_dev_state_t _ZN7android11AudioSystem24getDeviceConnectionStateE15audio_devices_tPKc(audio_devices_t device,
+ const char *device_address)
+{
+ return AudioSystem::getDeviceConnectionState(device, device_address);
+}
+
status_t AudioSystem::setPhoneState(audio_mode_t state)
{
if (uint32_t(state) >= AUDIO_MODE_CNT) return BAD_VALUE;
@@ -596,6 +602,14 @@ audio_io_handle_t AudioSystem::getOutput(audio_stream_type_t stream,
return aps->getOutput(stream, samplingRate, format, channelMask, flags);
}
+extern "C" audio_io_handle_t _ZN7android11AudioSystem9getOutputE19audio_stream_type_tjjj27audio_policy_output_flags_t(audio_stream_type_t stream,
+ uint32_t samplingRate,
+ uint32_t format,
+ uint32_t channels,
+ audio_output_flags_t flags) {
+ return AudioSystem::getOutput(stream,samplingRate,(audio_format_t) format, channels, flags);
+}
+
status_t AudioSystem::startOutput(audio_io_handle_t output,
audio_stream_type_t stream,
int session)
@@ -774,4 +788,13 @@ void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who) {
ALOGW("AudioPolicyService server died!");
}
+#ifdef USE_SAMSUNG_SEPARATEDSTREAM
+extern "C" bool _ZN7android11AudioSystem17isSeparatedStreamE19audio_stream_type_t(audio_stream_type_t stream)
+{
+ ALOGD("audio_stream_type_t: %d", stream);
+ ALOGD("isSeparatedStream: false");
+ return false;
+}
+#endif // USE_SAMSUNG_SEPARATEDSTREAM
+
}; // namespace android
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 362d022..36b1469 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -2,6 +2,10 @@
**
** Copyright 2007, The Android Open Source Project
**
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
+
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
@@ -92,10 +96,15 @@ status_t AudioTrack::getMinFrameCount(
// ---------------------------------------------------------------------------
AudioTrack::AudioTrack()
- : mStatus(NO_INIT),
+ : mCblk(NULL),
+ mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT)
+#ifdef QCOM_HARDWARE
+ ,mAudioFlinger(NULL),
+ mObserver(NULL)
+#endif
{
}
@@ -110,10 +119,15 @@ AudioTrack::AudioTrack(
void* user,
int notificationFrames,
int sessionId)
- : mStatus(NO_INIT),
+ : mCblk(NULL),
+ mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT)
+#ifdef QCOM_HARDWARE
+ ,mAudioFlinger(NULL),
+ mObserver(NULL)
+#endif
{
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
@@ -132,9 +146,14 @@ AudioTrack::AudioTrack(
void* user,
int notificationFrames,
int sessionId)
- : mStatus(NO_INIT),
+ : mCblk(NULL),
+ mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT)
+#ifdef QCOM_HARDWARE
+ ,mAudioFlinger(NULL),
+ mObserver(NULL)
+#endif
{
mStatus = set((audio_stream_type_t)streamType, sampleRate, (audio_format_t)format,
(audio_channel_mask_t) channelMask,
@@ -153,10 +172,15 @@ AudioTrack::AudioTrack(
void* user,
int notificationFrames,
int sessionId)
- : mStatus(NO_INIT),
+ : mCblk(NULL),
+ mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT)
+#ifdef QCOM_HARDWARE
+ ,mAudioFlinger(NULL),
+ mObserver(NULL)
+#endif
{
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
@@ -177,9 +201,22 @@ AudioTrack::~AudioTrack()
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
+#ifdef QCOM_HARDWARE
+ if (mAudioTrack != 0) {
+ mAudioTrack.clear();
+ AudioSystem::releaseAudioSessionId(mSessionId);
+ }
+
+ if (mDirectTrack != 0) {
+ mDirectTrack.clear();
+ }
+#else
mAudioTrack.clear();
+#endif
IPCThreadState::self()->flushCommands();
+#ifndef QCOM_HARDWARE
AudioSystem::releaseAudioSessionId(mSessionId);
+#endif
}
}
@@ -252,12 +289,24 @@ status_t AudioTrack::set(
flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
}
+#ifdef QCOM_ENHANCED_AUDIO
+ if ((streamType == AUDIO_STREAM_VOICE_CALL)
+ && (channelMask == AUDIO_CHANNEL_OUT_MONO)
+ && ((sampleRate == 8000 || sampleRate == 16000)))
+ {
+ ALOGD("Turn on Direct Output for VOIP RX");
+ flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_VOIP_RX|AUDIO_OUTPUT_FLAG_DIRECT);
+ }
+#endif
+
if (!audio_is_output_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
return BAD_VALUE;
}
uint32_t channelCount = popcount(channelMask);
+ ALOGV("AudioTrack getOutput streamType %d, sampleRate %d, format %d, channelMask %d, flags %x",
+ streamType, sampleRate, format, channelMask, flags);
audio_io_handle_t output = AudioSystem::getOutput(
streamType,
sampleRate, format, channelMask,
@@ -278,46 +327,86 @@ status_t AudioTrack::set(
mFlags = flags;
mCbf = cbf;
- if (cbf != NULL) {
- mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
- mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
- }
-
- // create the IAudioTrack
- status_t status = createTrack_l(streamType,
- sampleRate,
- format,
- channelMask,
- frameCount,
- flags,
- sharedBuffer,
- output);
+#ifdef QCOM_HARDWARE
+ if (flags & AUDIO_OUTPUT_FLAG_LPA || flags & AUDIO_OUTPUT_FLAG_TUNNEL) {
+ ALOGV("Creating Direct Track");
+ const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
+ if (audioFlinger == 0) {
+ ALOGE("Could not get audioflinger");
+ return NO_INIT;
+ }
+ mAudioFlinger = audioFlinger;
+ status_t status = NO_ERROR;
+ mAudioDirectOutput = output;
+ mDirectTrack = audioFlinger->createDirectTrack( getpid(),
+ sampleRate,
+ channelMask,
+ mAudioDirectOutput,
+ &mSessionId,
+ this,
+ streamType,
+ &status);
+ if(status != NO_ERROR) {
+ ALOGE("createDirectTrack returned with status %d", status);
+ return status;
+ }
+ mAudioTrack = NULL;
+ mSharedBuffer = NULL;
+ }
+ else {
+#endif
+ if (cbf != NULL) {
+ mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
+ mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
+ }
+ // create the IAudioTrack
+ status_t status = createTrack_l(streamType,
+ sampleRate,
+ format,
+ channelMask,
+ frameCount,
+ flags,
+ sharedBuffer,
+ output);
- if (status != NO_ERROR) {
- if (mAudioTrackThread != 0) {
- mAudioTrackThread->requestExit();
- mAudioTrackThread.clear();
+ if (status != NO_ERROR) {
+ if (mAudioTrackThread != 0) {
+ mAudioTrackThread->requestExit();
+ mAudioTrackThread.clear();
+ }
+ return status;
}
- return status;
+#ifdef QCOM_HARDWARE
+ AudioSystem::acquireAudioSessionId(mSessionId);
+ mAudioDirectOutput = -1;
+ mDirectTrack = NULL;
+ mSharedBuffer = sharedBuffer;
}
-
+ mUserData = user;
+#endif
mStatus = NO_ERROR;
mStreamType = streamType;
mFormat = format;
mChannelMask = channelMask;
mChannelCount = channelCount;
- mSharedBuffer = sharedBuffer;
+
mMuted = false;
mActive = false;
- mUserData = user;
+
mLoopCount = 0;
mMarkerPosition = 0;
mMarkerReached = false;
mNewPosition = 0;
mUpdatePeriod = 0;
mFlushed = false;
+
+#ifndef QCOM_HARDWARE
+ mSharedBuffer = sharedBuffer;
+ mUserData = user;
AudioSystem::acquireAudioSessionId(mSessionId);
+#endif
+
mRestoreStatus = NO_ERROR;
return NO_ERROR;
}
@@ -331,6 +420,11 @@ status_t AudioTrack::initCheck() const
uint32_t AudioTrack::latency() const
{
+#ifdef QCOM_HARDWARE
+ if(mAudioDirectOutput != -1) {
+ return mAudioFlinger->latency(mAudioDirectOutput);
+ }
+#endif
return mLatency;
}
@@ -351,6 +445,11 @@ int AudioTrack::channelCount() const
uint32_t AudioTrack::frameCount() const
{
+#ifdef QCOM_HARDWARE
+ if(mAudioDirectOutput != -1) {
+ return mAudioFlinger->frameCount(mAudioDirectOutput);
+ }
+#endif
return mCblk->frameCount;
}
@@ -372,6 +471,16 @@ sp<IMemory>& AudioTrack::sharedBuffer()
void AudioTrack::start()
{
+#ifdef QCOM_HARDWARE
+ if (mDirectTrack != NULL) {
+ if(mActive == 0) {
+ mActive = 1;
+ mDirectTrack->start();
+ }
+ return;
+ }
+#endif
+
sp<AudioTrackThread> t = mAudioTrackThread;
ALOGV("start %p", this);
@@ -436,26 +545,35 @@ void AudioTrack::stop()
AutoMutex lock(mLock);
if (mActive) {
- mActive = false;
- mCblk->cv.signal();
- mAudioTrack->stop();
- // Cancel loops (If we are in the middle of a loop, playback
- // would not stop until loopCount reaches 0).
- setLoop_l(0, 0, 0);
- // the playback head position will reset to 0, so if a marker is set, we need
- // to activate it again
- mMarkerReached = false;
- // Force flush if a shared buffer is used otherwise audioflinger
- // will not stop before end of buffer is reached.
- if (mSharedBuffer != 0) {
- flush_l();
- }
- if (t != 0) {
- t->pause();
- } else {
- setpriority(PRIO_PROCESS, 0, mPreviousPriority);
- set_sched_policy(0, mPreviousSchedulingGroup);
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ mActive = false;
+ mDirectTrack->stop();
+ } else if (mAudioTrack != NULL) {
+#endif
+ mActive = false;
+ mCblk->cv.signal();
+ mAudioTrack->stop();
+ // Cancel loops (If we are in the middle of a loop, playback
+ // would not stop until loopCount reaches 0).
+ setLoop_l(0, 0, 0);
+ // the playback head position will reset to 0, so if a marker is set, we need
+ // to activate it again
+ mMarkerReached = false;
+ // Force flush if a shared buffer is used otherwise audioflinger
+ // will not stop before end of buffer is reached.
+ if (mSharedBuffer != 0) {
+ flush_l();
+ }
+ if (t != 0) {
+ t->pause();
+ } else {
+ setpriority(PRIO_PROCESS, 0, mPreviousPriority);
+ set_sched_policy(0, mPreviousSchedulingGroup);
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
}
}
@@ -469,7 +587,12 @@ bool AudioTrack::stopped() const
void AudioTrack::flush()
{
AutoMutex lock(mLock);
- flush_l();
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ mDirectTrack->flush();
+ } else
+#endif
+ flush_l();
}
// must be called with mLock held
@@ -497,14 +620,28 @@ void AudioTrack::pause()
AutoMutex lock(mLock);
if (mActive) {
mActive = false;
- mCblk->cv.signal();
- mAudioTrack->pause();
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ ALOGV("mDirectTrack pause");
+ mDirectTrack->pause();
+ } else {
+#endif
+ mCblk->cv.signal();
+ mAudioTrack->pause();
+#ifdef QCOM_HARDWARE
+ }
+#endif
}
}
void AudioTrack::mute(bool e)
{
- mAudioTrack->mute(e);
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ mDirectTrack->mute(e);
+ } else
+#endif
+ mAudioTrack->mute(e);
mMuted = e;
}
@@ -522,8 +659,13 @@ status_t AudioTrack::setVolume(float left, float right)
AutoMutex lock(mLock);
mVolume[LEFT] = left;
mVolume[RIGHT] = right;
-
- mCblk->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
+#ifdef QCOM_HARDWARE
+ if(mDirectTrack != NULL) {
+ ALOGV("mDirectTrack->setVolume(left = %f , right = %f)", left,right);
+ mDirectTrack->setVolume(left, right);
+ } else
+#endif
+ mCblk->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
return NO_ERROR;
}
@@ -540,6 +682,11 @@ void AudioTrack::getVolume(float* left, float* right) const
status_t AudioTrack::setAuxEffectSendLevel(float level)
{
+#ifdef QCOM_HARDWARE
+ if (mDirectTrack != NULL) {
+ return NO_ERROR;
+ }
+#endif
ALOGV("setAuxEffectSendLevel(%f)", level);
if (level < 0.0f || level > 1.0f) {
return BAD_VALUE;
@@ -586,6 +733,11 @@ uint32_t AudioTrack::getSampleRate() const
}
AutoMutex lock(mLock);
+#ifdef QCOM_HARDWARE
+ if(mAudioDirectOutput != -1) {
+ return mAudioFlinger->sampleRate(mAudioDirectOutput);
+ }
+#endif
return mCblk->sampleRate;
}
@@ -733,6 +885,11 @@ int AudioTrack::getSessionId() const
return mSessionId;
}
+extern "C" int _ZNK7android10AudioTrack12getSessionIdEv();
+extern "C" int _ZN7android10AudioTrack12getSessionIdEv() {
+ return _ZNK7android10AudioTrack12getSessionIdEv();
+}
+
status_t AudioTrack::attachAuxEffect(int effectId)
{
ALOGV("attachAuxEffect(%d)", effectId);
@@ -1071,7 +1228,12 @@ void AudioTrack::releaseBuffer(Buffer* audioBuffer)
ssize_t AudioTrack::write(const void* buffer, size_t userSize)
{
-
+#ifdef QCOM_HARDWARE
+ if (mDirectTrack != NULL) {
+ mDirectTrack->write(buffer,userSize);
+ return userSize;
+ }
+#endif
if (mSharedBuffer != 0) return INVALID_OPERATION;
if (mIsTimed) return INVALID_OPERATION;
@@ -1441,7 +1603,8 @@ status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
result.append(" AudioTrack::dump\n");
snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType, mVolume[0], mVolume[1]);
result.append(buffer);
- snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%d)\n", mFormat, mChannelCount, mCblk->frameCount);
+ snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%d)\n", mFormat, mChannelCount,
+ (mCblk == 0) ? 0 : mCblk->frameCount);
result.append(buffer);
snprintf(buffer, 255, " sample rate(%d), status(%d), muted(%d)\n", (mCblk == 0) ? 0 : mCblk->sampleRate, mStatus, mMuted);
result.append(buffer);
@@ -1451,6 +1614,23 @@ status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
return NO_ERROR;
}
+#ifdef QCOM_HARDWARE
+void AudioTrack::notify(int msg) {
+ if (msg == EVENT_UNDERRUN) {
+ ALOGV("Posting event underrun to Audio Sink.");
+ mCbf(EVENT_UNDERRUN, mUserData, 0);
+ }
+}
+
+status_t AudioTrack::getTimeStamp(uint64_t *tstamp) {
+ if (mDirectTrack != NULL) {
+ *tstamp = mDirectTrack->getTimeStamp();
+ ALOGV("Timestamp %lld ", *tstamp);
+ }
+ return NO_ERROR;
+}
+#endif
+
// =========================================================================
AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index ce8ffc4..cc6a75c 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -1,6 +1,9 @@
/*
**
** Copyright 2007, The Android Open Source Project
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -73,6 +76,9 @@ enum {
LOAD_HW_MODULE,
GET_PRIMARY_OUTPUT_SAMPLING_RATE,
GET_PRIMARY_OUTPUT_FRAME_COUNT,
+#ifdef QCOM_HARDWARE
+ CREATE_DIRECT_TRACK
+#endif
};
class BpAudioFlinger : public BpInterface<IAudioFlinger>
@@ -132,6 +138,49 @@ public:
return track;
}
+#ifdef QCOM_HARDWARE
+ virtual sp<IDirectTrack> createDirectTrack(
+ pid_t pid,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t output,
+ int *sessionId,
+ IDirectTrackClient* client,
+ audio_stream_type_t streamType,
+ status_t *status)
+ {
+ Parcel data, reply;
+ sp<IDirectTrack> track;
+ data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeInt32(pid);
+ data.writeInt32(sampleRate);
+ data.writeInt32(channelMask);
+ data.writeInt32((int32_t)output);
+ int lSessionId = 0;
+ if (sessionId != NULL) {
+ lSessionId = *sessionId;
+ }
+ data.writeInt32(lSessionId);
+ data.write(client, sizeof(IDirectTrackClient));
+ data.writeInt32((int32_t) streamType);
+ status_t lStatus = remote()->transact(CREATE_DIRECT_TRACK, data, &reply);
+ if (lStatus != NO_ERROR) {
+ ALOGE("createDirectTrack error: %s", strerror(-lStatus));
+ } else {
+ lSessionId = reply.readInt32();
+ if (sessionId != NULL) {
+ *sessionId = lSessionId;
+ }
+ lStatus = reply.readInt32();
+ track = interface_cast<IDirectTrack>(reply.readStrongBinder());
+ }
+ if (status) {
+ *status = lStatus;
+ }
+ return track;
+ }
+#endif
+
virtual sp<IAudioRecord> openRecord(
pid_t pid,
audio_io_handle_t input,
@@ -738,6 +787,26 @@ status_t BnAudioFlinger::onTransact(
reply->writeStrongBinder(track->asBinder());
return NO_ERROR;
} break;
+#ifdef QCOM_HARDWARE
+ case CREATE_DIRECT_TRACK: {
+ CHECK_INTERFACE(IAudioFlinger, data, reply);
+ pid_t pid = data.readInt32();
+ uint32_t sampleRate = data.readInt32();
+ audio_channel_mask_t channelMask = data.readInt32();
+ audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
+ int sessionId = data.readInt32();
+ IDirectTrackClient* client;
+ data.read(client,sizeof(IDirectTrackClient));
+ int streamType = data.readInt32();
+ status_t status;
+ sp<IDirectTrack> track = createDirectTrack(pid,
+ sampleRate, channelMask, output, &sessionId, client,(audio_stream_type_t) streamType, &status);
+ reply->writeInt32(sessionId);
+ reply->writeInt32(status);
+ reply->writeStrongBinder(track->asBinder());
+ return NO_ERROR;
+ } break;
+#endif
case OPEN_RECORD: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
pid_t pid = data.readInt32();
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 4178b29..e289703 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -1,4 +1,8 @@
/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -49,7 +53,11 @@ public:
uint32_t stream = *(const uint32_t *)param2;
ALOGV("ioConfigChanged stream %d", stream);
data.writeInt32(stream);
- } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
+ } else if (event != AudioSystem::OUTPUT_CLOSED &&
+#ifdef QCOM_HARDWARE
+ event != AudioSystem::EFFECT_CONFIG_CHANGED &&
+#endif
+ event != AudioSystem::INPUT_CLOSED) {
const AudioSystem::OutputDescriptor *desc = (const AudioSystem::OutputDescriptor *)param2;
data.writeInt32(desc->samplingRate);
data.writeInt32(desc->format);
diff --git a/media/libmedia/IDirectTrack.cpp b/media/libmedia/IDirectTrack.cpp
new file mode 100644
index 0000000..480761f
--- /dev/null
+++ b/media/libmedia/IDirectTrack.cpp
@@ -0,0 +1,178 @@
+/*
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "IDirectTrack"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <binder/Parcel.h>
+
+#include <media/IDirectTrack.h>
+
+namespace android {
+
+enum {
+ START = IBinder::FIRST_CALL_TRANSACTION,
+ STOP,
+ FLUSH,
+ MUTE,
+ PAUSE,
+ SET_VOLUME,
+ WRITE,
+ GET_TIMESTAMP
+};
+
+class BpDirectTrack : public BpInterface<IDirectTrack>
+{
+public:
+ BpDirectTrack(const sp<IBinder>& impl)
+ : BpInterface<IDirectTrack>(impl)
+ {
+ }
+
+ virtual status_t start()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ status_t status = remote()->transact(START, data, &reply);
+ if (status == NO_ERROR) {
+ status = reply.readInt32();
+ } else {
+ ALOGW("start() error: %s", strerror(-status));
+ }
+ return status;
+ }
+
+ virtual void stop()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ remote()->transact(STOP, data, &reply);
+ }
+
+ virtual void flush()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ remote()->transact(FLUSH, data, &reply);
+ }
+
+ virtual void mute(bool e)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ data.writeInt32(e);
+ remote()->transact(MUTE, data, &reply);
+ }
+
+ virtual void pause()
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ remote()->transact(PAUSE, data, &reply);
+ }
+
+ virtual void setVolume(float left, float right)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ remote()->transact(SET_VOLUME, data, &reply);
+ }
+
+ virtual ssize_t write(const void* buffer, size_t bytes)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ ssize_t bytesWritten = remote()->transact(WRITE, data, &reply);
+ return bytesWritten;
+ }
+
+ virtual int64_t getTimeStamp() {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrack::getInterfaceDescriptor());
+ int64_t tstamp = remote()->transact(GET_TIMESTAMP, data, &reply);
+ return tstamp;
+ }
+};
+
+IMPLEMENT_META_INTERFACE(DirectTrack, "android.media.IDirectTrack");
+
+// ----------------------------------------------------------------------
+
+status_t BnDirectTrack::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch(code) {
+ case START: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ reply->writeInt32(start());
+ return NO_ERROR;
+ } break;
+ case STOP: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ stop();
+ return NO_ERROR;
+ } break;
+ case FLUSH: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ flush();
+ return NO_ERROR;
+ } break;
+ case MUTE: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ mute( data.readInt32() );
+ return NO_ERROR;
+ } break;
+ case PAUSE: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ pause();
+ return NO_ERROR;
+ }
+ case SET_VOLUME: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ float left = 1.0;
+ float right = 1.0;
+ setVolume(left, right);
+ return NO_ERROR;
+ }
+ case WRITE: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ const void *buffer = (void *)data.readInt32();
+ size_t bytes = data.readInt32();
+ ssize_t bytesWritten = write(buffer, bytes);
+ reply->writeInt32(bytesWritten);
+ return NO_ERROR;
+ }
+ case GET_TIMESTAMP: {
+ CHECK_INTERFACE(IDirectTrack, data, reply);
+ int64_t time = getTimeStamp();
+ reply->writeInt32(time);
+ return NO_ERROR;
+ }
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+}; // namespace android
+
diff --git a/media/libmedia/IDirectTrackClient.cpp b/media/libmedia/IDirectTrackClient.cpp
new file mode 100644
index 0000000..86a47ec
--- /dev/null
+++ b/media/libmedia/IDirectTrackClient.cpp
@@ -0,0 +1,69 @@
+/*
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+#include <media/IDirectTrackClient.h>
+
+namespace android {
+
+enum {
+ NOTIFY = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+class BpDirectTrackClient: public BpInterface<IDirectTrackClient>
+{
+public:
+ BpDirectTrackClient(const sp<IBinder>& impl)
+ : BpInterface<IDirectTrackClient>(impl)
+ {
+ }
+
+ virtual void notify(int msg)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IDirectTrackClient::getInterfaceDescriptor());
+ data.writeInt32(msg);
+ remote()->transact(NOTIFY, data, &reply, IBinder::FLAG_ONEWAY);
+ }
+};
+
+IMPLEMENT_META_INTERFACE(DirectTrackClient, "android.media.IDirectTrackClient");
+
+// ----------------------------------------------------------------------
+
+status_t BnDirectTrackClient::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ switch (code) {
+ case NOTIFY: {
+ CHECK_INTERFACE(IDirectTrackClient, data, reply);
+ int msg = data.readInt32();
+ notify(msg);
+ return NO_ERROR;
+ } break;
+ default:
+ return BBinder::onTransact(code, data, reply, flags);
+ }
+}
+
+}; // namespace android
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index 8319cd7..fa536a6 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -1,6 +1,7 @@
/*
**
** Copyright 2010, The Android Open Source Project
+** Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -45,7 +46,10 @@ const MediaProfiles::NameToTagMap MediaProfiles::sAudioEncoderNameMap[] = {
{"amrwb", AUDIO_ENCODER_AMR_WB},
{"aac", AUDIO_ENCODER_AAC},
{"heaac", AUDIO_ENCODER_HE_AAC},
- {"aaceld", AUDIO_ENCODER_AAC_ELD}
+ {"aaceld", AUDIO_ENCODER_AAC_ELD},
+#ifdef QCOM_HARDWARE
+ {"lpcm", AUDIO_ENCODER_LPCM},
+#endif
};
const MediaProfiles::NameToTagMap MediaProfiles::sFileFormatMap[] = {
@@ -70,6 +74,10 @@ const MediaProfiles::NameToTagMap MediaProfiles::sCamcorderQualityNameMap[] = {
{"720p", CAMCORDER_QUALITY_720P},
{"1080p", CAMCORDER_QUALITY_1080P},
{"qvga", CAMCORDER_QUALITY_QVGA},
+ {"fwvga", CAMCORDER_QUALITY_FWVGA},
+ {"wvga", CAMCORDER_QUALITY_WVGA},
+ {"vga", CAMCORDER_QUALITY_VGA},
+ {"wqvga", CAMCORDER_QUALITY_WQVGA},
{"timelapselow", CAMCORDER_QUALITY_TIME_LAPSE_LOW},
{"timelapsehigh", CAMCORDER_QUALITY_TIME_LAPSE_HIGH},
@@ -800,6 +808,10 @@ MediaProfiles::createDefaultCamcorderProfiles(MediaProfiles *profiles)
MediaProfiles::createDefaultAudioEncoders(MediaProfiles *profiles)
{
profiles->mAudioEncoders.add(createDefaultAmrNBEncoderCap());
+#ifdef QCOM_HARDWARE
+ profiles->mAudioEncoders.add(createDefaultAacEncoderCap());
+ profiles->mAudioEncoders.add(createDefaultLpcmEncoderCap());
+#endif
}
/*static*/ void
@@ -834,6 +846,22 @@ MediaProfiles::createDefaultAmrNBEncoderCap()
AUDIO_ENCODER_AMR_NB, 5525, 12200, 8000, 8000, 1, 1);
}
+#ifdef QCOM_HARDWARE
+/*static*/ MediaProfiles::AudioEncoderCap*
+MediaProfiles::createDefaultAacEncoderCap()
+{
+ return new MediaProfiles::AudioEncoderCap(
+ AUDIO_ENCODER_AAC, 64000, 156000, 8000, 48000, 1, 2);
+}
+
+/*static*/ MediaProfiles::AudioEncoderCap*
+MediaProfiles::createDefaultLpcmEncoderCap()
+{
+ return new MediaProfiles::AudioEncoderCap(
+ AUDIO_ENCODER_LPCM, 768000, 4608000, 48000, 48000, 1, 6);
+}
+#endif
+
/*static*/ void
MediaProfiles::createDefaultImageEncodingQualityLevels(MediaProfiles *profiles)
{
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 253602d..a5ce487 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -798,7 +798,8 @@ const unsigned char /*tone_type*/ ToneGenerator::sToneMappingTable[NUM_REGIONS-1
// none
//
////////////////////////////////////////////////////////////////////////////////
-ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava) {
+ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava)
+ : mpAudioTrack(NULL), mpToneDesc(NULL), mpNewToneDesc(NULL) {
ALOGV("ToneGenerator constructor: streamType=%d, volume=%f", streamType, volume);
@@ -811,9 +812,6 @@ ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool
mThreadCanCallJava = threadCanCallJava;
mStreamType = streamType;
mVolume = volume;
- mpAudioTrack = NULL;
- mpToneDesc = NULL;
- mpNewToneDesc = NULL;
// Generate tone by chunks of 20 ms to keep cadencing precision
mProcessSize = (mSamplingRate * 20) / 1000;
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 5b5ed71..a583d48 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -47,7 +47,12 @@ LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright/rtsp \
$(TOP)/frameworks/av/media/libstagefright/wifi-display \
$(TOP)/frameworks/native/include/media/openmax \
- $(TOP)/external/tremolo/Tremolo \
+ $(TOP)/external/tremolo/Tremolo
+
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+LOCAL_C_INCLUDES += \
+ $(TOP)/hardware/qcom/media/mm-core/inc
+endif
LOCAL_MODULE:= libmediaplayerservice
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 9bedff1..414c262 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -1,5 +1,9 @@
/*
**
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
+**
** Copyright 2008, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
@@ -74,6 +78,7 @@
#include "Crypto.h"
#include "HDCP.h"
#include "RemoteDisplay.h"
+#define DEFAULT_SAMPLE_RATE 44100
namespace {
using android::media::Metadata;
@@ -1363,6 +1368,22 @@ status_t MediaPlayerService::AudioOutput::getPosition(uint32_t *position) const
return mTrack->getPosition(position);
}
+#ifdef QCOM_HARDWARE
+ssize_t MediaPlayerService::AudioOutput::sampleRate() const
+{
+ if (mTrack == 0) return NO_INIT;
+ return DEFAULT_SAMPLE_RATE;
+}
+
+status_t MediaPlayerService::AudioOutput::getTimeStamp(uint64_t *tstamp)
+{
+ if (tstamp == 0) return BAD_VALUE;
+ if (mTrack == 0) return NO_INIT;
+ mTrack->getTimeStamp(tstamp);
+ return NO_ERROR;
+}
+#endif
+
status_t MediaPlayerService::AudioOutput::getFramesWritten(uint32_t *frameswritten) const
{
if (mTrack == 0) return NO_INIT;
@@ -1379,6 +1400,65 @@ status_t MediaPlayerService::AudioOutput::open(
mCallback = cb;
mCallbackCookie = cookie;
+#ifdef QCOM_HARDWARE
+ if (flags & AUDIO_OUTPUT_FLAG_LPA || flags & AUDIO_OUTPUT_FLAG_TUNNEL) {
+ ALOGV("AudioOutput open: with flags %x",flags);
+ channelMask = audio_channel_out_mask_from_count(channelCount);
+ if (0 == channelMask) {
+ ALOGE("open() error, can't derive mask for %d audio channels", channelCount);
+ return NO_INIT;
+ }
+ AudioTrack *audioTrack = NULL;
+ CallbackData *newcbd = NULL;
+ if (mCallback != NULL) {
+ newcbd = new CallbackData(this);
+ audioTrack = new AudioTrack(
+ mStreamType,
+ sampleRate,
+ format,
+ channelMask,
+ 0,
+ flags,
+ CallbackWrapper,
+ newcbd,
+ 0,
+ mSessionId);
+ if ((audioTrack == 0) || (audioTrack->initCheck() != NO_ERROR)) {
+ ALOGE("Unable to create audio track");
+ delete audioTrack;
+ delete newcbd;
+ return NO_INIT;
+ }
+ } else {
+ ALOGE("no callback supplied");
+ return NO_INIT;
+ }
+
+ if (mRecycledTrack) {
+ //usleep(500000);
+ // if we're not going to reuse the track, unblock and flush it
+ if (mCallbackData != NULL) {
+ mCallbackData->setOutput(NULL);
+ mCallbackData->endTrackSwitch();
+ }
+ mRecycledTrack->flush();
+ delete mRecycledTrack;
+ mRecycledTrack = NULL;
+ delete mCallbackData;
+ mCallbackData = NULL;
+ close();
+ }
+
+ ALOGV("setVolume");
+ mCallbackData = newcbd;
+ audioTrack->setVolume(mLeftVolume, mRightVolume);
+ mSampleRateHz = sampleRate;
+ mFlags = flags;
+ mTrack = audioTrack;
+ return NO_ERROR;
+ }
+#endif
+
// Check argument "bufferCount" against the mininum buffer count
if (bufferCount < mMinBufferCount) {
ALOGD("bufferCount (%d) is too small and increased to %d", bufferCount, mMinBufferCount);
@@ -1551,7 +1631,7 @@ void MediaPlayerService::AudioOutput::switchToNextOutput() {
ssize_t MediaPlayerService::AudioOutput::write(const void* buffer, size_t size)
{
- LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
+ //LOG_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
//ALOGV("write(%p, %u)", buffer, size);
if (mTrack) {
@@ -1637,35 +1717,56 @@ status_t MediaPlayerService::AudioOutput::attachAuxEffect(int effectId)
void MediaPlayerService::AudioOutput::CallbackWrapper(
int event, void *cookie, void *info) {
//ALOGV("callbackwrapper");
- if (event != AudioTrack::EVENT_MORE_DATA) {
- return;
- }
-
- CallbackData *data = (CallbackData*)cookie;
- data->lock();
- AudioOutput *me = data->getOutput();
- AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
- if (me == NULL) {
- // no output set, likely because the track was scheduled to be reused
- // by another player, but the format turned out to be incompatible.
+#ifdef QCOM_HARDWARE
+ if (event == AudioTrack::EVENT_UNDERRUN) {
+ ALOGW("Event underrun");
+ CallbackData *data = (CallbackData*)cookie;
+ data->lock();
+ AudioOutput *me = data->getOutput();
+ if (me == NULL) {
+ // no output set, likely because the track was scheduled to be reused
+ // by another player, but the format turned out to be incompatible.
+ data->unlock();
+ return;
+ }
+ ALOGD("Callback!!!");
+ (*me->mCallback)(
+ me, NULL, (size_t)AudioTrack::EVENT_UNDERRUN, me->mCallbackCookie);
data->unlock();
- buffer->size = 0;
return;
}
+#endif
+ if (event == AudioTrack::EVENT_MORE_DATA) {
+ CallbackData *data = (CallbackData*)cookie;
+ data->lock();
+ AudioOutput *me = data->getOutput();
+ AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
+ if (me == NULL) {
+ // no output set, likely because the track was scheduled to be reused
+ // by another player, but the format turned out to be incompatible.
+ data->unlock();
+ buffer->size = 0;
+ return;
+ }
+
+ size_t actualSize = (*me->mCallback)(
+ me, buffer->raw, buffer->size, me->mCallbackCookie);
- size_t actualSize = (*me->mCallback)(
- me, buffer->raw, buffer->size, me->mCallbackCookie);
+ if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
+ // We've reached EOS but the audio track is not stopped yet,
+ // keep playing silence.
- if (actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
- // We've reached EOS but the audio track is not stopped yet,
- // keep playing silence.
+ memset(buffer->raw, 0, buffer->size);
+ actualSize = buffer->size;
+ }
- memset(buffer->raw, 0, buffer->size);
- actualSize = buffer->size;
+ buffer->size = actualSize;
+ data->unlock();
}
- buffer->size = actualSize;
- data->unlock();
+ return;
+
+
}
int MediaPlayerService::AudioOutput::getSessionId() const
@@ -1700,6 +1801,13 @@ status_t MediaPlayerService::AudioCache::getPosition(uint32_t *position) const
return NO_ERROR;
}
+#ifdef QCOM_HARDWARE
+ssize_t MediaPlayerService::AudioCache::sampleRate() const
+{
+ return mSampleRate;
+}
+#endif
+
status_t MediaPlayerService::AudioCache::getFramesWritten(uint32_t *written) const
{
if (written == 0) return BAD_VALUE;
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index fd648df..54df9d2 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -114,6 +114,10 @@ class MediaPlayerService : public BnMediaPlayerService
void setNextOutput(const sp<AudioOutput>& nextOutput);
void switchToNextOutput();
virtual bool needsTrailingPadding() { return mNextOutput == NULL; }
+#ifdef QCOM_HARDWARE
+ virtual ssize_t sampleRate() const;
+ virtual status_t getTimeStamp(uint64_t *tstamp);
+#endif
private:
static void setMinBufferCount();
@@ -205,8 +209,10 @@ class MediaPlayerService : public BnMediaPlayerService
virtual void close() {}
void setAudioStreamType(audio_stream_type_t streamType) {}
void setVolume(float left, float right) {}
- virtual status_t setPlaybackRatePermille(int32_t ratePermille) { return INVALID_OPERATION; }
+#ifndef QCOM_HARDWARE
uint32_t sampleRate() const { return mSampleRate; }
+#endif
+ virtual status_t setPlaybackRatePermille(int32_t ratePermille) { return INVALID_OPERATION; }
audio_format_t format() const { return mFormat; }
size_t size() const { return mSize; }
status_t wait();
@@ -216,6 +222,9 @@ class MediaPlayerService : public BnMediaPlayerService
static void notify(void* cookie, int msg,
int ext1, int ext2, const Parcel *obj);
virtual status_t dump(int fd, const Vector<String16>& args) const;
+#ifdef QCOM_HARDWARE
+ virtual ssize_t sampleRate() const;
+#endif
private:
AudioCache();
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 57b0ec2..335dd43 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -29,6 +30,10 @@
#include <media/stagefright/AudioSource.h>
#include <media/stagefright/AMRWriter.h>
#include <media/stagefright/AACWriter.h>
+#ifdef QCOM_HARDWARE
+#include <media/stagefright/ExtendedWriter.h>
+#include <media/stagefright/WAVEWriter.h>
+#endif
#include <media/stagefright/CameraSource.h>
#include <media/stagefright/CameraSourceTimeLapse.h>
#include <media/stagefright/MPEG2TSWriter.h>
@@ -49,6 +54,9 @@
#include <unistd.h>
#include <system/audio.h>
+#ifdef QCOM_HARDWARE
+#include <QCMediaDefs.h>
+#endif
#include "ARTPWriter.h"
@@ -159,6 +167,26 @@ status_t StagefrightRecorder::setAudioEncoder(audio_encoder ae) {
mAudioEncoder = ae;
}
+#ifdef QCOM_HARDWARE
+ // Use default values if appropriate setparam's weren't called.
+ if(mAudioEncoder == AUDIO_ENCODER_AAC) {
+ mSampleRate = mSampleRate ? mSampleRate : 48000;
+ mAudioChannels = mAudioChannels ? mAudioChannels : 2;
+ mAudioBitRate = mAudioBitRate ? mAudioBitRate : 156000;
+ } else if(mAudioEncoder == AUDIO_ENCODER_LPCM) {
+ mSampleRate = mSampleRate ? mSampleRate : 48000;
+ mAudioChannels = mAudioChannels ? mAudioChannels : 2;
+ mAudioBitRate = mAudioBitRate ? mAudioBitRate : 4608000;
+ } else if(mAudioEncoder == AUDIO_ENCODER_AMR_WB) {
+ mSampleRate = 16000;
+ mAudioChannels = 1;
+ mAudioBitRate = 23850;
+ } else {
+ mSampleRate = mSampleRate ? mSampleRate : 8000;
+ mAudioChannels = mAudioChannels ? mAudioChannels : 1;
+ mAudioBitRate = mAudioBitRate ? mAudioBitRate : 12200;
+ }
+#endif
return OK;
}
@@ -327,7 +355,7 @@ status_t StagefrightRecorder::setParamAudioSamplingRate(int32_t sampleRate) {
status_t StagefrightRecorder::setParamAudioNumberOfChannels(int32_t channels) {
ALOGV("setParamAudioNumberOfChannels: %d", channels);
- if (channels <= 0 || channels >= 3) {
+ if (channels != 1 && channels != 2 && channels != 6) {
ALOGE("Invalid number of audio channels: %d", channels);
return BAD_VALUE;
}
@@ -768,7 +796,15 @@ status_t StagefrightRecorder::start() {
case OUTPUT_FORMAT_MPEG2TS:
status = startMPEG2TSRecording();
break;
+#ifdef QCOM_HARDWARE
+ case OUTPUT_FORMAT_QCP:
+ status = startExtendedRecording( );
+ break;
+ case OUTPUT_FORMAT_WAVE:
+ status = startWAVERecording( );
+ break;
+#endif
default:
ALOGE("Unsupported output file format: %d", mOutputFormat);
status = UNKNOWN_ERROR;
@@ -809,6 +845,11 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
sp<MetaData> encMeta = new MetaData;
const char *mime;
switch (mAudioEncoder) {
+#ifdef QCOM_HARDWARE
+ case AUDIO_ENCODER_LPCM:
+ mime = MEDIA_MIMETYPE_AUDIO_RAW;
+ break;
+#endif
case AUDIO_ENCODER_AMR_NB:
case AUDIO_ENCODER_DEFAULT:
mime = MEDIA_MIMETYPE_AUDIO_AMR_NB;
@@ -828,6 +869,14 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
mime = MEDIA_MIMETYPE_AUDIO_AAC;
encMeta->setInt32(kKeyAACProfile, OMX_AUDIO_AACObjectELD);
break;
+#ifdef QCOM_HARDWARE
+ case AUDIO_ENCODER_EVRC:
+ mime = MEDIA_MIMETYPE_AUDIO_EVRC;
+ break;
+ case AUDIO_ENCODER_QCELP:
+ mime = MEDIA_MIMETYPE_AUDIO_QCELP;
+ break;
+#endif
default:
ALOGE("Unknown audio encoder: %d", mAudioEncoder);
@@ -852,6 +901,17 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() {
sp<MediaSource> audioEncoder =
OMXCodec::Create(client.interface(), encMeta,
true /* createEncoder */, audioSource);
+#ifdef QCOM_HARDWARE
+ // If encoder could not be created (as in LPCM), then
+ // use the AudioSource directly as the MediaSource.
+ if (audioEncoder == NULL && AUDIO_ENCODER_LPCM) {
+ ALOGD("No encoder is needed, use the AudioSource directly as the MediaSource for LPCM format");
+ audioEncoder = audioSource;
+ }
+ if (mAudioSourceNode != NULL) {
+ mAudioSourceNode.clear();
+ }
+#endif
mAudioSourceNode = audioSource;
return audioEncoder;
@@ -888,13 +948,35 @@ status_t StagefrightRecorder::startAMRRecording() {
mAudioEncoder);
return BAD_VALUE;
}
+#ifdef QCOM_HARDWARE
+ if (mSampleRate != 8000) {
+ ALOGE("Invalid sampling rate %d used for AMRNB recording",
+ mSampleRate);
+ return BAD_VALUE;
+ }
+#endif
} else { // mOutputFormat must be OUTPUT_FORMAT_AMR_WB
if (mAudioEncoder != AUDIO_ENCODER_AMR_WB) {
ALOGE("Invlaid encoder %d used for AMRWB recording",
mAudioEncoder);
return BAD_VALUE;
}
+#ifdef QCOM_HARDWARE
+ if (mSampleRate != 16000) {
+ ALOGE("Invalid sample rate %d used for AMRWB recording",
+ mSampleRate);
+ return BAD_VALUE;
+ }
+#endif
+ }
+
+#ifdef QCOM_HARDWARE
+ if (mAudioChannels != 1) {
+ ALOGE("Invalid number of audio channels %d used for amr recording",
+ mAudioChannels);
+ return BAD_VALUE;
}
+#endif
mWriter = new AMRWriter(mOutputFd);
status_t status = startRawAudioRecording();
@@ -905,6 +987,24 @@ status_t StagefrightRecorder::startAMRRecording() {
return status;
}
+#ifdef QCOM_HARDWARE
+status_t StagefrightRecorder::startWAVERecording() {
+ CHECK(mOutputFormat == OUTPUT_FORMAT_WAVE);
+
+ CHECK(mAudioEncoder == AUDIO_ENCODER_LPCM);
+ CHECK(mAudioSource != AUDIO_SOURCE_CNT);
+
+ mWriter = new WAVEWriter(mOutputFd);
+ status_t status = startRawAudioRecording();
+ if (status != OK) {
+ mWriter.clear();
+ mWriter = NULL;
+ }
+
+ return status;
+}
+#endif
+
status_t StagefrightRecorder::startRawAudioRecording() {
if (mAudioSource >= AUDIO_SOURCE_CNT) {
ALOGE("Invalid audio source: %d", mAudioSource);
@@ -1450,6 +1550,9 @@ status_t StagefrightRecorder::setupAudioEncoder(const sp<MediaWriter>& writer) {
case AUDIO_ENCODER_AAC:
case AUDIO_ENCODER_HE_AAC:
case AUDIO_ENCODER_AAC_ELD:
+#ifdef QCOM_HARDWARE
+ case AUDIO_ENCODER_LPCM:
+#endif
break;
default:
@@ -1611,7 +1714,12 @@ status_t StagefrightRecorder::stop() {
::close(mOutputFd);
mOutputFd = -1;
}
-
+#ifdef QCOM_HARDWARE
+ if (mAudioSourceNode != NULL) {
+ mAudioSourceNode.clear();
+ mAudioSourceNode = NULL;
+ }
+#endif
if (mStarted) {
mStarted = false;
@@ -1653,9 +1761,15 @@ status_t StagefrightRecorder::reset() {
mVideoHeight = 144;
mFrameRate = -1;
mVideoBitRate = 192000;
+#ifdef QCOM_HARDWARE
+ mSampleRate = 0;
+ mAudioChannels = 0;
+ mAudioBitRate = 0;
+#else
mSampleRate = 8000;
mAudioChannels = 1;
mAudioBitRate = 12200;
+#endif
mInterleaveDurationUs = 0;
mIFramesIntervalSec = 1;
mAudioSourceNode = 0;
@@ -1767,4 +1881,48 @@ status_t StagefrightRecorder::dump(
::write(fd, result.string(), result.size());
return OK;
}
+
+#ifdef QCOM_HARDWARE
+status_t StagefrightRecorder::startExtendedRecording() {
+ CHECK(mOutputFormat == OUTPUT_FORMAT_QCP);
+
+ if (mSampleRate != 8000) {
+ ALOGE("Invalid sampling rate %d used for recording",
+ mSampleRate);
+ return BAD_VALUE;
+ }
+ if (mAudioChannels != 1) {
+ ALOGE("Invalid number of audio channels %d used for recording",
+ mAudioChannels);
+ return BAD_VALUE;
+ }
+
+ if (mAudioSource >= AUDIO_SOURCE_CNT) {
+ ALOGE("Invalid audio source: %d", mAudioSource);
+ return BAD_VALUE;
+ }
+
+ sp<MediaSource> audioEncoder = createAudioSource();
+
+ if (audioEncoder == NULL) {
+ ALOGE("AudioEncoder NULL");
+ return UNKNOWN_ERROR;
+ }
+
+ mWriter = new ExtendedWriter(dup(mOutputFd));
+ mWriter->addSource(audioEncoder);
+
+ if (mMaxFileDurationUs != 0) {
+ mWriter->setMaxFileDuration(mMaxFileDurationUs);
+ }
+ if (mMaxFileSizeBytes != 0) {
+ mWriter->setMaxFileSize(mMaxFileSizeBytes);
+ }
+ mWriter->setListener(mListener);
+ mWriter->start();
+
+ return OK;
+}
+#endif
+
} // namespace android
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index ec5ce7e..3f0b821 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -132,6 +133,9 @@ private:
status_t startMPEG4Recording();
status_t startAMRRecording();
status_t startAACRecording();
+#ifdef QCOM_HARDWARE
+ status_t startWAVERecording();
+#endif
status_t startRawAudioRecording();
status_t startRTPRecording();
status_t startMPEG2TSRecording();
@@ -187,6 +191,11 @@ private:
StagefrightRecorder(const StagefrightRecorder &);
StagefrightRecorder &operator=(const StagefrightRecorder &);
+
+#ifdef QCOM_HARDWARE
+ /* extension */
+ status_t startExtendedRecording();
+#endif
};
} // namespace android
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index a01d03f..19bf28a 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -35,6 +35,13 @@
#include <media/hardware/HardwareAPI.h>
#include <OMX_Component.h>
+#ifdef USE_TI_CUSTOM_DOMX
+#include <OMX_TI_IVCommon.h>
+#endif
+
+#ifdef USE_SAMSUNG_COLORFORMAT
+#include <sec_format.h>
+#endif
#include "include/avc_utils.h"
@@ -507,11 +514,22 @@ status_t ACodec::allocateOutputBuffersFromNativeWindow() {
return err;
}
+#ifdef USE_SAMSUNG_COLORFORMAT
+ OMX_COLOR_FORMATTYPE eNativeColorFormat = def.format.video.eColorFormat;
+ setNativeWindowColorFormat(eNativeColorFormat);
+
+ err = native_window_set_buffers_geometry(
+ mNativeWindow.get(),
+ def.format.video.nFrameWidth,
+ def.format.video.nFrameHeight,
+ eNativeColorFormat);
+#else
err = native_window_set_buffers_geometry(
mNativeWindow.get(),
def.format.video.nFrameWidth,
def.format.video.nFrameHeight,
def.format.video.eColorFormat);
+#endif
if (err != 0) {
ALOGE("native_window_set_buffers_geometry failed: %s (%d)",
@@ -654,6 +672,25 @@ status_t ACodec::allocateOutputBuffersFromNativeWindow() {
return err;
}
+#ifdef USE_SAMSUNG_COLORFORMAT
+void ACodec::setNativeWindowColorFormat(OMX_COLOR_FORMATTYPE &eNativeColorFormat)
+{
+ // In case of Samsung decoders, we set proper native color format for the Native Window
+ if (!strcasecmp(mComponentName.c_str(), "OMX.SEC.AVC.Decoder")
+ || !strcasecmp(mComponentName.c_str(), "OMX.SEC.FP.AVC.Decoder")) {
+ switch (eNativeColorFormat) {
+ case OMX_COLOR_FormatYUV420SemiPlanar:
+ eNativeColorFormat = (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YCbCr_420_SP;
+ break;
+ case OMX_COLOR_FormatYUV420Planar:
+ default:
+ eNativeColorFormat = (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YCbCr_420_P;
+ break;
+ }
+ }
+}
+#endif
+
status_t ACodec::cancelBufferToNativeWindow(BufferInfo *info) {
CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
@@ -783,6 +820,10 @@ status_t ACodec::setComponentRole(
"audio_decoder.amrnb", "audio_encoder.amrnb" },
{ MEDIA_MIMETYPE_AUDIO_AMR_WB,
"audio_decoder.amrwb", "audio_encoder.amrwb" },
+#ifdef QCOM_ENHANCED_AUDIO
+ { MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS,
+ "audio_decoder.amrwbplus", "audio_encoder.amrwbplus" },
+#endif
{ MEDIA_MIMETYPE_AUDIO_AAC,
"audio_decoder.aac", "audio_encoder.aac" },
{ MEDIA_MIMETYPE_AUDIO_VORBIS,
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index faa0f31..e2209ff4 100644..100755
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -60,10 +60,51 @@ LOCAL_SRC_FILES:= \
LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/av/include/media/stagefright/timedtext \
$(TOP)/frameworks/native/include/media/hardware \
- $(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/flac/include \
$(TOP)/external/tremolo \
- $(TOP)/external/openssl/include \
+ $(TOP)/external/openssl/include
+
+ifneq ($(TI_CUSTOM_DOMX_PATH),)
+LOCAL_C_INCLUDES += $(TI_CUSTOM_DOMX_PATH)/omx_core/inc
+LOCAL_CPPFLAGS += -DUSE_TI_CUSTOM_DOMX
+else
+LOCAL_C_INCLUDES += $(TOP)/frameworks/native/include/media/openmax
+endif
+
+ifeq ($(BOARD_USES_STE_FMRADIO),true)
+LOCAL_SRC_FILES += \
+ FMRadioSource.cpp \
+ PCMExtractor.cpp
+endif
+
+ifeq ($(BOARD_USES_QCOM_HARDWARE),true)
+LOCAL_SRC_FILES += \
+ ExtendedWriter.cpp \
+ QCMediaDefs.cpp \
+ QCOMXCodec.cpp \
+ WAVEWriter.cpp \
+ ExtendedExtractor.cpp
+
+LOCAL_C_INCLUDES += \
+ $(TOP)/hardware/qcom/media/mm-core/inc
+
+ifeq ($(TARGET_QCOM_AUDIO_VARIANT),caf)
+ ifeq ($(call is-board-platform-in-list,msm8660 msm7x27a msm7x30),true)
+ LOCAL_SRC_FILES += LPAPlayer.cpp
+ else
+ LOCAL_SRC_FILES += LPAPlayerALSA.cpp
+ endif
+ ifeq ($(BOARD_USES_ALSA_AUDIO),true)
+ ifeq ($(call is-chipset-in-board-platform,msm8960),true)
+ LOCAL_CFLAGS += -DUSE_TUNNEL_MODE
+ LOCAL_CFLAGS += -DTUNNEL_MODE_SUPPORTS_AMRWB
+ endif
+ endif
+LOCAL_CFLAGS += -DQCOM_ENHANCED_AUDIO
+LOCAL_SRC_FILES += TunnelPlayer.cpp
+endif
+endif
+
LOCAL_SHARED_LIBRARIES := \
libbinder \
@@ -91,6 +132,7 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_STATIC_LIBRARIES := \
libstagefright_color_conversion \
+ libstagefright_mp3dec \
libstagefright_aacenc \
libstagefright_matroska \
libstagefright_timedtext \
@@ -115,6 +157,20 @@ LOCAL_SHARED_LIBRARIES += \
LOCAL_CFLAGS += -Wno-multichar
+ifeq ($(BOARD_USE_SAMSUNG_COLORFORMAT), true)
+LOCAL_CFLAGS += -DUSE_SAMSUNG_COLORFORMAT
+
+# Include native color format header path
+LOCAL_C_INCLUDES += \
+ $(TOP)/hardware/samsung/exynos4/hal/include \
+ $(TOP)/hardware/samsung/exynos4/include
+
+endif
+
+ifeq ($(BOARD_USE_TI_DUCATI_H264_PROFILE), true)
+LOCAL_CFLAGS += -DUSE_TI_DUCATI_H264_PROFILE
+endif
+
LOCAL_MODULE:= libstagefright
LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index 4208019..deb6b70 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -310,6 +310,13 @@ size_t AudioPlayer::AudioSinkCallback(
void *buffer, size_t size, void *cookie) {
AudioPlayer *me = (AudioPlayer *)cookie;
+#ifdef QCOM_ENHANCED_AUDIO
+ if (buffer == NULL) {
+ //Not applicable for AudioPlayer
+ ALOGE("This indicates the event underrun case for LPA/Tunnel");
+ return 0;
+ }
+#endif
return me->fillBuffer(buffer, size);
}
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 861aebe..bb2d415 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2010 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -56,7 +57,7 @@ AudioSource::AudioSource(
mNumFramesReceived(0),
mNumClientOwnedBuffers(0) {
ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount);
- CHECK(channelCount == 1 || channelCount == 2);
+ CHECK(channelCount == 1 || channelCount == 2 || channelCount == 6);
int minFrameCount;
status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
@@ -82,6 +83,17 @@ AudioSource::AudioSource(
this,
frameCount);
mInitCheck = mRecord->initCheck();
+
+ //configure the auto ramp start duration
+ mAutoRampStartUs = kAutoRampStartUs;
+ uint32_t playbackLatencyMs = 0;
+ if (AudioSystem::getOutputLatency(&playbackLatencyMs,
+ AUDIO_STREAM_DEFAULT) == OK) {
+ if (2*playbackLatencyMs*1000LL > kAutoRampStartUs) {
+ mAutoRampStartUs = 2*playbackLatencyMs*1000LL;
+ }
+ }
+ ALOGD("Start autoramp from %lld", mAutoRampStartUs);
} else {
mInitCheck = status;
}
@@ -237,14 +249,14 @@ status_t AudioSource::read(
int64_t timeUs;
CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
int64_t elapsedTimeUs = timeUs - mStartTimeUs;
- if (elapsedTimeUs < kAutoRampStartUs) {
+ if (elapsedTimeUs < mAutoRampStartUs) {
memset((uint8_t *) buffer->data(), 0, buffer->range_length());
- } else if (elapsedTimeUs < kAutoRampStartUs + kAutoRampDurationUs) {
+ } else if (elapsedTimeUs < mAutoRampStartUs + kAutoRampDurationUs) {
int32_t autoRampDurationFrames =
(kAutoRampDurationUs * mSampleRate + 500000LL) / 1000000LL;
int32_t autoRampStartFrames =
- (kAutoRampStartUs * mSampleRate + 500000LL) / 1000000LL;
+ (mAutoRampStartUs * mSampleRate + 500000LL) / 1000000LL;
int32_t nFrames = mNumFramesReceived - autoRampStartFrames;
rampVolume(nFrames, autoRampDurationFrames,
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 1e2625a..7d077f5 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -1,6 +1,9 @@
/*
* Copyright (C) 2009 The Android Open Source Project
- *
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
@@ -39,8 +42,15 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/timedtext/TimedTextDriver.h>
#include <media/stagefright/AudioPlayer.h>
+#ifdef QCOM_ENHANCED_AUDIO
+#include <media/stagefright/LPAPlayer.h>
+#ifdef USE_TUNNEL_MODE
+#include <media/stagefright/TunnelPlayer.h>
+#endif
+#endif
#include <media/stagefright/DataSource.h>
#include <media/stagefright/FileSource.h>
+#include <media/stagefright/FMRadioSource.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaExtractor.h>
@@ -57,6 +67,8 @@
#define USE_SURFACE_ALLOC 1
#define FRAME_DROP_FREQ 0
+#define LPA_MIN_DURATION_USEC_ALLOWED 30000000
+#define LPA_MIN_DURATION_USEC_DEFAULT 60000000
namespace android {
@@ -64,6 +76,9 @@ static int64_t kLowWaterMarkUs = 2000000ll; // 2secs
static int64_t kHighWaterMarkUs = 5000000ll; // 5secs
static const size_t kLowWaterMarkBytes = 40000;
static const size_t kHighWaterMarkBytes = 200000;
+#ifdef QCOM_ENHANCED_AUDIO
+int AwesomePlayer::mTunnelAliveAP = 0;
+#endif
struct AwesomeEvent : public TimedEventQueue::Event {
AwesomeEvent(
@@ -214,6 +229,9 @@ AwesomePlayer::AwesomePlayer()
mAudioStatusEventPending = false;
reset();
+#ifdef USE_TUNNEL_MODE
+ mIsTunnelAudio = false;
+#endif
}
AwesomePlayer::~AwesomePlayer() {
@@ -223,6 +241,17 @@ AwesomePlayer::~AwesomePlayer() {
reset();
+#ifdef USE_TUNNEL_MODE
+ // Disable Tunnel Mode Audio
+ if (mIsTunnelAudio) {
+ if(mTunnelAliveAP > 0) {
+ mTunnelAliveAP--;
+ ALOGV("mTunnelAliveAP = %d", mTunnelAliveAP);
+ }
+ }
+ mIsTunnelAudio = false;
+#endif
+
mClient.disconnect();
}
@@ -579,6 +608,18 @@ void AwesomePlayer::reset_l() {
mStats.mVideoHeight = -1;
mStats.mFlags = 0;
mStats.mTracks.clear();
+ mStats.mConsecutiveFramesDropped = 0;
+ mStats.mCatchupTimeStart = 0;
+ mStats.mNumTimesSyncLoss = 0;
+ mStats.mMaxEarlyDelta = 0;
+ mStats.mMaxLateDelta = 0;
+ mStats.mMaxTimeSyncLoss = 0;
+ mStats.mTotalFrames = 0;
+ mStats.mLastFrameUs = 0;
+ mStats.mTotalTimeUs = 0;
+ mStats.mLastPausedTimeMs = 0;
+ mStats.mLastSeekToTimeMs = 0;
+ mStats.mFirstFrameLatencyUs = 0;
}
mWatchForAudioSeekComplete = false;
@@ -857,6 +898,9 @@ status_t AwesomePlayer::play() {
}
status_t AwesomePlayer::play_l() {
+#ifdef QCOM_ENHANCED_AUDIO
+ int tunnelObjectsAlive = 0;
+#endif
modifyFlags(SEEK_PREVIEW, CLEAR);
if (mFlags & PLAYING) {
@@ -884,6 +928,13 @@ status_t AwesomePlayer::play_l() {
if (mAudioSource != NULL) {
if (mAudioPlayer == NULL) {
if (mAudioSink != NULL) {
+#ifdef QCOM_ENHANCED_AUDIO
+ sp<MetaData> format = mAudioTrack->getFormat();
+ const char *mime;
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ CHECK(success);
+#endif
+
bool allowDeepBuffering;
int64_t cachedDurationUs;
bool eos;
@@ -895,8 +946,81 @@ status_t AwesomePlayer::play_l() {
} else {
allowDeepBuffering = false;
}
-
- mAudioPlayer = new AudioPlayer(mAudioSink, allowDeepBuffering, this);
+#ifdef QCOM_ENHANCED_AUDIO
+#ifdef USE_TUNNEL_MODE
+ // Create tunnel player if tunnel mode is enabled
+ ALOGW("Trying to create tunnel player mIsTunnelAudio %d, \
+ LPAPlayer::objectsAlive %d, \
+ TunnelPlayer::mTunnelObjectsAlive = %d,\
+ (mAudioPlayer == NULL) %d",
+ mIsTunnelAudio, TunnelPlayer::mTunnelObjectsAlive,
+ LPAPlayer::objectsAlive,(mAudioPlayer == NULL));
+
+ if(mIsTunnelAudio && (mAudioPlayer == NULL) &&
+ (LPAPlayer::objectsAlive == 0) &&
+ (TunnelPlayer::mTunnelObjectsAlive == 0)) {
+ ALOGD("Tunnel player created for mime %s duration %lld\n",\
+ mime, mDurationUs);
+ bool initCheck = false;
+ if(mVideoSource != NULL) {
+ // The parameter true is to inform tunnel player that
+ // clip is audio video
+ mAudioPlayer = new TunnelPlayer(mAudioSink, initCheck,
+ this, true);
+ }
+ else {
+ mAudioPlayer = new TunnelPlayer(mAudioSink, initCheck,
+ this);
+ }
+ if(!initCheck) {
+ ALOGE("deleting Tunnel Player - initCheck failed");
+ delete mAudioPlayer;
+ mAudioPlayer = NULL;
+ }
+ }
+ tunnelObjectsAlive = (TunnelPlayer::mTunnelObjectsAlive);
+#endif
+ int32_t nchannels = 0;
+ if(mAudioTrack != NULL) {
+ sp<MetaData> format = mAudioTrack->getFormat();
+ if(format != NULL) {
+ format->findInt32( kKeyChannelCount, &nchannels );
+ ALOGV("nchannels %d;LPA will be skipped if nchannels is > 2 or nchannels == 0",nchannels);
+ }
+ }
+ char lpaDecode[PROPERTY_VALUE_MAX];
+ uint32_t minDurationForLPA = LPA_MIN_DURATION_USEC_DEFAULT;
+ char minUserDefDuration[PROPERTY_VALUE_MAX];
+ property_get("lpa.decode",lpaDecode,"0");
+ property_get("lpa.min_duration",minUserDefDuration,"LPA_MIN_DURATION_USEC_DEFAULT");
+ minDurationForLPA = atoi(minUserDefDuration);
+ if(minDurationForLPA < LPA_MIN_DURATION_USEC_ALLOWED) {
+ ALOGE("LPAPlayer::Clip duration setting of less than 30sec not supported, defaulting to 60sec");
+ minDurationForLPA = LPA_MIN_DURATION_USEC_DEFAULT;
+ }
+ if((strcmp("true",lpaDecode) == 0) && (mAudioPlayer == NULL) &&
+ (tunnelObjectsAlive==0) && (nchannels && (nchannels <= 2)))
+ {
+ ALOGV("LPAPlayer::getObjectsAlive() %d",LPAPlayer::objectsAlive);
+ if ( mDurationUs > minDurationForLPA
+ && (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG) || !strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AAC))
+ && LPAPlayer::objectsAlive == 0 && mVideoSource == NULL) {
+ ALOGD("LPAPlayer created, LPA MODE detected mime %s duration %lld", mime, mDurationUs);
+ bool initCheck = false;
+ mAudioPlayer = new LPAPlayer(mAudioSink, initCheck, this);
+ if(!initCheck) {
+ delete mAudioPlayer;
+ mAudioPlayer = NULL;
+ }
+ }
+ }
+ if(mAudioPlayer == NULL) {
+ ALOGV("AudioPlayer created, Non-LPA mode mime %s duration %lld\n", mime, mDurationUs);
+#endif
+ mAudioPlayer = new AudioPlayer(mAudioSink, allowDeepBuffering, this);
+#ifdef QCOM_ENHANCED_AUDIO
+ }
+#endif
mAudioPlayer->setSource(mAudioSource);
mTimeSource = mAudioPlayer;
@@ -914,9 +1038,14 @@ status_t AwesomePlayer::play_l() {
if (mVideoSource == NULL) {
// We don't want to post an error notification at this point,
// the error returned from MediaPlayer::start() will suffice.
-
- status_t err = startAudioPlayer_l(
- false /* sendErrorNotification */);
+ bool sendErrorNotification = false;
+#ifdef IS_TUNNEL_MODE
+ if(mIsTunnelAudio) {
+ // For tunnel Audio error has to be posted to the client
+ sendErrorNotification = true;
+ }
+#endif
+ status_t err = startAudioPlayer_l(sendErrorNotification);
if (err != OK) {
delete mAudioPlayer;
@@ -938,6 +1067,12 @@ status_t AwesomePlayer::play_l() {
mTimeSource = &mSystemTimeSource;
}
+ {
+ Mutex::Autolock autoLock(mStatsLock);
+ mStats.mFirstFrameLatencyStartUs = getTimeOfDayUs();
+ mStats.mVeryFirstFrame = true;
+ }
+
if (mVideoSource != NULL) {
// Kick off video playback
postVideoEvent_l();
@@ -1161,6 +1296,11 @@ status_t AwesomePlayer::pause_l(bool at_eos) {
Playback::PAUSE, 0);
}
+ if(!(mFlags & AT_EOS)){
+ Mutex::Autolock autoLock(mStatsLock);
+ mStats.mLastPausedTimeMs = mVideoTimeUs/1000;
+ }
+
uint32_t params = IMediaPlayerService::kBatteryDataTrackDecoder;
if ((mAudioSource != NULL) && (mAudioSource != mAudioTrack)) {
params |= IMediaPlayerService::kBatteryDataTrackAudio;
@@ -1297,7 +1437,8 @@ status_t AwesomePlayer::getPosition(int64_t *positionUs) {
status_t AwesomePlayer::seekTo(int64_t timeUs) {
ATRACE_CALL();
- if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
+ if (((timeUs == 0) && (mExtractorFlags & MediaExtractor::CAN_SEEK_TO_ZERO)) ||
+ (mExtractorFlags & MediaExtractor::CAN_SEEK)) {
Mutex::Autolock autoLock(mLock);
return seekTo_l(timeUs);
}
@@ -1320,6 +1461,12 @@ status_t AwesomePlayer::seekTo_l(int64_t timeUs) {
}
mSeeking = SEEK;
+
+ {
+ Mutex::Autolock autoLock(mStatsLock);
+ mStats.mFirstFrameLatencyStartUs = getTimeOfDayUs();
+ mStats.mVeryFirstFrame = true;
+ }
mSeekNotificationSent = false;
mSeekTimeUs = timeUs;
modifyFlags((AT_EOS | AUDIO_AT_EOS | VIDEO_AT_EOS), CLEAR);
@@ -1385,14 +1532,119 @@ status_t AwesomePlayer::initAudioDecoder() {
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
+#ifdef QCOM_ENHANCED_AUDIO
+ int32_t nchannels = 0;
+ int32_t isADTS = 0;
+ meta->findInt32( kKeyChannelCount, &nchannels );
+ meta->findInt32(kKeyIsADTS, &isADTS);
+ if(isADTS == 1){
+ ALOGV("Widevine content\n");
+ }
+ ALOGV("nchannels %d;LPA will be skipped if nchannels is > 2 or nchannels == 0",
+ nchannels);
+#endif
+#ifdef USE_TUNNEL_MODE
+ char tunnelDecode[PROPERTY_VALUE_MAX];
+ property_get("tunnel.decode",tunnelDecode,"0");
+ // Enable tunnel mode for mp3 and aac and if the clip is not aac adif
+ // and if no other tunnel mode instances aare running.
+ ALOGD("Tunnel Mime Type: %s, object alive = %d, mTunnelAliveAP = %d",\
+ mime, (TunnelPlayer::mTunnelObjectsAlive), mTunnelAliveAP);
+
+ bool sys_prop_enabled = !strcmp("true",tunnelDecode) || atoi(tunnelDecode);
+
+ //widevine will fallback to software decoder
+ if (sys_prop_enabled && (TunnelPlayer::mTunnelObjectsAlive == 0) &&
+ mTunnelAliveAP == 0 && (isADTS == 0) &&
+ mAudioSink->realtime() &&
+ inSupportedTunnelFormats(mime)) {
+
+ if (mVideoSource != NULL) {
+ char tunnelAVDecode[PROPERTY_VALUE_MAX];
+ property_get("tunnel.audiovideo.decode",tunnelAVDecode,"0");
+ sys_prop_enabled = !strncmp("true", tunnelAVDecode, 4) || atoi(tunnelAVDecode);
+ if (sys_prop_enabled) {
+ ALOGD("Enable Tunnel Mode for A-V playback");
+ mIsTunnelAudio = true;
+ }
+ }
+ else {
+ ALOGI("Tunnel Mode Audio Enabled");
+ mIsTunnelAudio = true;
+ }
+ }
+ else
+ ALOGD("Normal Audio Playback");
+
+ if (isStreamingHTTP()) {
+ ALOGV("Streaming, force disable tunnel mode playback");
+ mIsTunnelAudio = false;
+ }
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW) ||
+ (mIsTunnelAudio && (mTunnelAliveAP == 0))) {
+ ALOGD("Set Audio Track as Audio Source");
+ if(mIsTunnelAudio) {
+ mTunnelAliveAP++;
+ }
+#else
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
+#endif
mAudioSource = mAudioTrack;
} else {
+#ifdef QCOM_ENHANCED_AUDIO
+ // For LPA Playback use the decoder without OMX layer
+ char *matchComponentName = NULL;
+ int64_t durationUs;
+ uint32_t flags = 0;
+ char lpaDecode[128];
+ uint32_t minDurationForLPA = LPA_MIN_DURATION_USEC_DEFAULT;
+ char minUserDefDuration[PROPERTY_VALUE_MAX];
+ property_get("lpa.decode",lpaDecode,"0");
+ property_get("lpa.min_duration",minUserDefDuration,"LPA_MIN_DURATION_USEC_DEFAULT");
+ minDurationForLPA = atoi(minUserDefDuration);
+ if(minDurationForLPA < LPA_MIN_DURATION_USEC_ALLOWED) {
+ ALOGE("LPAPlayer::Clip duration setting of less than 30sec not supported, defaulting to 60sec");
+ minDurationForLPA = LPA_MIN_DURATION_USEC_DEFAULT;
+ }
+ if (mAudioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
+ Mutex::Autolock autoLock(mMiscStateLock);
+ if (mDurationUs < 0 || durationUs > mDurationUs) {
+ mDurationUs = durationUs;
+ }
+ }
+ if ( mDurationUs > minDurationForLPA
+ && (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG) || !strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AAC))
+ && LPAPlayer::objectsAlive == 0 && mVideoSource == NULL && (strcmp("true",lpaDecode) == 0)
+ && (nchannels && (nchannels <= 2)) ) {
+ char nonOMXDecoder[128];
+ if(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
+ ALOGD("matchComponentName is set to MP3Decoder %lld, mime %s",mDurationUs,mime);
+ property_get("use.non-omx.mp3.decoder",nonOMXDecoder,"0");
+ if((strcmp("true",nonOMXDecoder) == 0)) {
+ matchComponentName = (char *) "MP3Decoder";
+ }
+ } else if((!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC))) {
+ ALOGD("matchComponentName is set to AACDecoder %lld, mime %s",mDurationUs,mime);
+ property_get("use.non-omx.aac.decoder",nonOMXDecoder,"0");
+ if((strcmp("true",nonOMXDecoder) == 0)) {
+ matchComponentName = (char *) "AACDecoder";
+ } else {
+ matchComponentName = (char *) "OMX.google.aac.decoder";
+ }
+ }
+ flags |= OMXCodec::kSoftwareCodecsOnly;
+ }
+ mAudioSource = OMXCodec::Create(
+ mClient.interface(), mAudioTrack->getFormat(),
+ false, // createEncoder
+ mAudioTrack, matchComponentName, flags,NULL);
+#else
mAudioSource = OMXCodec::Create(
mClient.interface(), mAudioTrack->getFormat(),
false, // createEncoder
mAudioTrack);
+#endif
}
if (mAudioSource != NULL) {
@@ -1576,6 +1828,12 @@ void AwesomePlayer::finishSeekIfNecessary(int64_t videoTimeUs) {
mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
Playback::START, videoTimeUs / 1000);
}
+
+ {
+ Mutex::Autolock autoLock(mStatsLock);
+ mStats.mLastSeekToTimeMs = mSeekTimeUs/1000;
+ logFirstFrame();
+ }
}
void AwesomePlayer::onVideoEvent() {
@@ -1588,6 +1846,14 @@ void AwesomePlayer::onVideoEvent() {
}
mVideoEventPending = false;
+ {
+ Mutex::Autolock autoLock(mStatsLock);
+ if(!mStats.mVeryFirstFrame && mSeeking == NO_SEEK){
+ mStats.mTotalTimeUs += getTimeOfDayUs() - mStats.mLastFrameUs;
+ }
+ mStats.mLastFrameUs = getTimeOfDayUs();
+ }
+
if (mSeeking != NO_SEEK) {
if (mVideoBuffer) {
mVideoBuffer->release();
@@ -1721,18 +1987,26 @@ void AwesomePlayer::onVideoEvent() {
modifyFlags(FIRST_FRAME, CLEAR);
mSinceLastDropped = 0;
mTimeSourceDeltaUs = ts->getRealTimeUs() - timeUs;
+
+ {
+ Mutex::Autolock autoLock(mStatsLock);
+ if(mStats.mVeryFirstFrame){
+ logFirstFrame();
+ mStats.mLastFrameUs = getTimeOfDayUs();
+ }
+ }
}
- int64_t realTimeUs, mediaTimeUs;
+ int64_t realTimeUs, mediaTimeUs, nowUs = 0, latenessUs = 0;
if (!(mFlags & AUDIO_AT_EOS) && mAudioPlayer != NULL
&& mAudioPlayer->getMediaTimeMapping(&realTimeUs, &mediaTimeUs)) {
mTimeSourceDeltaUs = realTimeUs - mediaTimeUs;
}
if (wasSeeking == SEEK_VIDEO_ONLY) {
- int64_t nowUs = ts->getRealTimeUs() - mTimeSourceDeltaUs;
+ nowUs = ts->getRealTimeUs() - mTimeSourceDeltaUs;
- int64_t latenessUs = nowUs - timeUs;
+ latenessUs = nowUs - timeUs;
ATRACE_INT("Video Lateness (ms)", latenessUs / 1E3);
@@ -1744,9 +2018,9 @@ void AwesomePlayer::onVideoEvent() {
if (wasSeeking == NO_SEEK) {
// Let's display the first frame after seeking right away.
- int64_t nowUs = ts->getRealTimeUs() - mTimeSourceDeltaUs;
+ nowUs = ts->getRealTimeUs() - mTimeSourceDeltaUs;
- int64_t latenessUs = nowUs - timeUs;
+ latenessUs = nowUs - timeUs;
ATRACE_INT("Video Lateness (ms)", latenessUs / 1E3);
@@ -1793,6 +2067,11 @@ void AwesomePlayer::onVideoEvent() {
{
Mutex::Autolock autoLock(mStatsLock);
++mStats.mNumVideoFramesDropped;
+ mStats.mConsecutiveFramesDropped++;
+ if (mStats.mConsecutiveFramesDropped == 1){
+ mStats.mCatchupTimeStart = mTimeSource->getRealTimeUs();
+ }
+ if(!(mFlags & AT_EOS)) logLate(timeUs,nowUs,latenessUs);
}
postVideoEvent_l();
@@ -1802,6 +2081,11 @@ void AwesomePlayer::onVideoEvent() {
if (latenessUs < -10000) {
// We're more than 10ms early.
+ logOnTime(timeUs,nowUs,latenessUs);
+ {
+ Mutex::Autolock autoLock(mStatsLock);
+ mStats.mConsecutiveFramesDropped = 0;
+ }
postVideoEvent_l(10000);
return;
}
@@ -1822,6 +2106,12 @@ void AwesomePlayer::onVideoEvent() {
notifyListener_l(MEDIA_INFO, MEDIA_INFO_RENDERING_START);
}
+ {
+ Mutex::Autolock autoLock(mStatsLock);
+ logOnTime(timeUs,nowUs,latenessUs);
+ mStats.mTotalFrames++;
+ mStats.mConsecutiveFramesDropped = 0;
+ }
}
mVideoBuffer->release();
@@ -2121,6 +2411,15 @@ status_t AwesomePlayer::finishSetDataSource_l() {
return UNKNOWN_ERROR;
}
}
+#ifdef STE_FM
+ } else if (!strncasecmp("fmradio://rx", mUri.string(), 12)) {
+ sniffedMIME = MEDIA_MIMETYPE_AUDIO_RAW;
+ dataSource = new FMRadioSource();
+ status_t err = dataSource->initCheck();
+ if (err != OK) {
+ return err;
+ }
+#endif
} else {
dataSource = DataSource::CreateFromURI(mUri.string(), &mUriHeaders);
}
@@ -2619,13 +2918,37 @@ status_t AwesomePlayer::dump(int fd, const Vector<String16> &args) const {
if ((ssize_t)i == mStats.mVideoTrackIndex) {
fprintf(out,
- " videoDimensions(%d x %d), "
- "numVideoFramesDecoded(%lld), "
- "numVideoFramesDropped(%lld)\n",
+ " videoDimensions(%d x %d)\n"
+ " Total Video Frames Decoded(%lld)\n"
+ " Total Video Frames Rendered(%lld)\n"
+ " Total Playback Duration(%lld ms)\n"
+ " numVideoFramesDropped(%lld)\n"
+ " Average Frames Per Second(%.4f)\n"
+ " Last Seek To Time(%lld ms)\n"
+ " Last Paused Time(%lld ms)\n"
+ " First Frame Latency (%lld ms)\n"
+ " Number of times AV Sync Lost(%u)\n"
+ " Max Video Ahead Time Delta(%u)\n"
+ " Max Video Behind Time Delta(%u)\n"
+ " Max Time Sync Loss(%u)\n"
+ " EOS(%d)\n"
+ " PLAYING(%d)\n",
mStats.mVideoWidth,
mStats.mVideoHeight,
mStats.mNumVideoFramesDecoded,
- mStats.mNumVideoFramesDropped);
+ mStats.mTotalFrames,
+ mStats.mTotalTimeUs/1000,
+ mStats.mNumVideoFramesDropped,
+ ((double)(mStats.mTotalFrames)*1E6)/((double)mStats.mTotalTimeUs),
+ mStats.mLastSeekToTimeMs,
+ mStats.mLastPausedTimeMs,
+ mStats.mFirstFrameLatencyUs/1000,
+ mStats.mNumTimesSyncLoss,
+ -mStats.mMaxEarlyDelta/1000,
+ mStats.mMaxLateDelta/1000,
+ mStats.mMaxTimeSyncLoss/1000,
+ (mFlags & AT_EOS) > 0,
+ (mFlags & PLAYING) > 0);
}
}
@@ -2659,4 +2982,80 @@ void AwesomePlayer::modifyFlags(unsigned value, FlagMode mode) {
}
}
+inline void AwesomePlayer::logFirstFrame() {
+ mStats.mFirstFrameLatencyUs = getTimeOfDayUs()-mStats.mFirstFrameLatencyStartUs;
+ mStats.mVeryFirstFrame = false;
+}
+
+inline void AwesomePlayer::logCatchUp(int64_t ts, int64_t clock, int64_t delta)
+{
+ if (mStats.mConsecutiveFramesDropped > 0) {
+ mStats.mNumTimesSyncLoss++;
+ if (mStats.mMaxTimeSyncLoss < (clock - mStats.mCatchupTimeStart) && clock > 0 && ts > 0) {
+ mStats.mMaxTimeSyncLoss = clock - mStats.mCatchupTimeStart;
+ }
+ }
+}
+
+inline void AwesomePlayer::logLate(int64_t ts, int64_t clock, int64_t delta)
+{
+ if (mStats.mMaxLateDelta < delta && clock > 0 && ts > 0) {
+ mStats.mMaxLateDelta = delta;
+ }
+}
+
+inline void AwesomePlayer::logOnTime(int64_t ts, int64_t clock, int64_t delta)
+{
+ bool needLogLate = false;
+ logCatchUp(ts, clock, delta);
+ if (delta <= 0) {
+ if ((-delta) > (-mStats.mMaxEarlyDelta) && clock > 0 && ts > 0) {
+ mStats.mMaxEarlyDelta = delta;
+ }
+ }
+ else {
+ needLogLate = true;
+ }
+
+ if(needLogLate) logLate(ts, clock, delta);
+}
+
+inline int64_t AwesomePlayer::getTimeOfDayUs() {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+
+ return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
+}
+
+#ifdef USE_TUNNEL_MODE
+bool AwesomePlayer::inSupportedTunnelFormats(const char * mime) {
+ const char * tunnelFormats [ ] = {
+ MEDIA_MIMETYPE_AUDIO_MPEG,
+ MEDIA_MIMETYPE_AUDIO_AAC,
+#ifdef TUNNEL_MODE_SUPPORTS_AMRWB
+ MEDIA_MIMETYPE_AUDIO_AMR_WB,
+ MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS
+#endif
+ };
+
+ if (!mime) {
+ return false;
+ }
+
+ size_t len = sizeof(tunnelFormats)/sizeof(const char *);
+ for (size_t i = 0; i < len; i++) {
+ const char * tf = tunnelFormats[i];
+ if (!strncasecmp(mime, tf, strlen(tf))) {
+ if (strlen(mime) == strlen(tf)) { //to prevent a substring match
+ ALOGD("Tunnel playback supported for %s", tf);
+ return true;
+ }
+ }
+ }
+
+ ALOGW("Tunnel playback unsupported for %s", mime);
+ return false;
+}
+#endif
+
} // namespace android
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index efd7af7..b8b9152 100755
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -31,6 +31,10 @@
#include <utils/String8.h>
#include <cutils/properties.h>
+#ifdef USE_TI_CUSTOM_DOMX
+#include <OMX_TI_IVCommon.h>
+#endif
+
namespace android {
static const int64_t CAMERA_SOURCE_TIMEOUT_NS = 3000000000LL;
@@ -96,11 +100,20 @@ static int32_t getColorFormat(const char* colorFormat) {
}
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV420SP)) {
+#ifdef USE_SAMSUNG_COLORFORMAT
+ static const int OMX_SEC_COLOR_FormatNV12LPhysicalAddress = 0x7F000002;
+ return OMX_SEC_COLOR_FormatNV12LPhysicalAddress;
+#else
return OMX_COLOR_FormatYUV420SemiPlanar;
+#endif
}
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_YUV422I)) {
+#if defined(TARGET_OMAP3) && defined(OMAP_ENHANCEMENT)
+ return OMX_COLOR_FormatCbYCrY;
+#else
return OMX_COLOR_FormatYCbYCr;
+#endif
}
if (!strcmp(colorFormat, CameraParameters::PIXEL_FORMAT_RGB565)) {
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index 9d0eea2..4ab602f 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -33,6 +33,9 @@
#include "include/OggExtractor.h"
#include "include/WAVExtractor.h"
#include "include/WVMExtractor.h"
+#ifdef QCOM_HARDWARE
+#include "include/ExtendedExtractor.h"
+#endif
#include "matroska/MatroskaExtractor.h"
@@ -122,6 +125,9 @@ void DataSource::RegisterDefaultSniffers() {
RegisterSniffer(SniffAAC);
RegisterSniffer(SniffMPEG2PS);
RegisterSniffer(SniffWVM);
+#ifdef QCOM_HARDWARE
+ RegisterSniffer(SniffExtendedExtractor);
+#endif
char value[PROPERTY_VALUE_MAX];
if (property_get("drm.service.enabled", value, NULL)
diff --git a/media/libstagefright/ExtendedExtractor.cpp b/media/libstagefright/ExtendedExtractor.cpp
new file mode 100644
index 0000000..8e0d5d7
--- /dev/null
+++ b/media/libstagefright/ExtendedExtractor.cpp
@@ -0,0 +1,110 @@
+/*Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ExtendedExtractor"
+#include <utils/Log.h>
+#include <dlfcn.h> // for dlopen/dlclose
+
+#include "include/ExtendedExtractor.h"
+
+static const char* EXTENDED_PARSER_LIB = "libExtendedExtractor.so";
+
+namespace android {
+
+void* ExtendedParserLib() {
+ static void* extendedParserLib = NULL;
+ static bool alreadyTriedToOpenParsers = false;
+
+ if(!alreadyTriedToOpenParsers) {
+ alreadyTriedToOpenParsers = true;
+
+ extendedParserLib = ::dlopen(EXTENDED_PARSER_LIB, RTLD_LAZY);
+
+ if(extendedParserLib == NULL) {
+ ALOGV("Failed to open EXTENDED_PARSER_LIB, dlerror = %s \n", dlerror());
+ }
+ }
+
+ return extendedParserLib;
+}
+
+MediaExtractor* ExtendedExtractor::CreateExtractor(const sp<DataSource> &source, const char *mime) {
+ static MediaExtractorFactory mediaFactoryFunction = NULL;
+ static bool alreadyTriedToFindFactoryFunction = false;
+
+ MediaExtractor* extractor = NULL;
+
+ if(!alreadyTriedToFindFactoryFunction) {
+
+ void *extendedParserLib = ExtendedParserLib();
+ if (extendedParserLib != NULL) {
+
+ mediaFactoryFunction = (MediaExtractorFactory) dlsym(extendedParserLib, MEDIA_CREATE_EXTRACTOR);
+ alreadyTriedToFindFactoryFunction = true;
+ }
+ }
+
+ if(mediaFactoryFunction==NULL) {
+ ALOGE(" dlsym for ExtendedExtractor factory function failed, dlerror = %s \n", dlerror());
+ return NULL;
+ }
+
+ extractor = mediaFactoryFunction(source, mime);
+ if(extractor==NULL) {
+ ALOGE(" ExtendedExtractor failed to instantiate extractor \n");
+ }
+
+ return extractor;
+}
+
+bool SniffExtendedExtractor(const sp<DataSource> &source, String8 *mimeType,
+ float *confidence,sp<AMessage> *meta) {
+ void *extendedParserLib = ExtendedParserLib();
+ bool retVal = false;
+ if (extendedParserLib != NULL) {
+ ExtendedExtractorSniffers extendedExtractorSniffers=
+ (ExtendedExtractorSniffers) dlsym(extendedParserLib, EXTENDED_EXTRACTOR_SNIFFERS);
+
+ if(extendedExtractorSniffers == NULL) {
+ ALOGE(" dlsym for extendedExtractorSniffers function failed, dlerror = %s \n", dlerror());
+ return retVal;
+ }
+
+ retVal = extendedExtractorSniffers(source, mimeType, confidence, meta);
+
+ if(!retVal) {
+ ALOGV("ExtendedExtractor:: ExtendedExtractorSniffers Failed");
+ }
+ }
+ return retVal;
+}
+
+} // namespace android
+
+
diff --git a/media/libstagefright/ExtendedWriter.cpp b/media/libstagefright/ExtendedWriter.cpp
new file mode 100644
index 0000000..7c8b08e
--- /dev/null
+++ b/media/libstagefright/ExtendedWriter.cpp
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/ExtendedWriter.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/mediarecorder.h>
+#include <system/audio.h>
+
+#include <sys/prctl.h>
+#include <sys/resource.h>
+
+#include <arpa/inet.h>
+#include <QCMediaDefs.h>
+
+#undef LOG_TAG
+#define LOG_TAG "ExtendedWriter"
+
+namespace android {
+
+ExtendedWriter::ExtendedWriter(const char *filename)
+ : mFile(fopen(filename, "wb")),
+ mInitCheck(mFile != NULL ? OK : NO_INIT),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false),
+ mOffset(0) {
+}
+
+ExtendedWriter::ExtendedWriter(int fd)
+ : mFile(fdopen(fd, "wb")),
+ mInitCheck(mFile != NULL ? OK : NO_INIT),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false),
+ mOffset(0) {
+}
+
+ExtendedWriter::~ExtendedWriter() {
+ if (mStarted) {
+ stop();
+ }
+
+ if (mFile != NULL) {
+ fclose(mFile);
+ mFile = NULL;
+ }
+}
+
+status_t ExtendedWriter::initCheck() const {
+ return mInitCheck;
+}
+
+status_t ExtendedWriter::addSource(const sp<MediaSource> &source) {
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource != NULL) {
+ ALOGE("A source already exists, return");
+ return UNKNOWN_ERROR;
+ }
+
+ sp<MetaData> meta = source->getFormat();
+
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ if ( !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
+ mFormat = AUDIO_FORMAT_QCELP;
+ } else if ( !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_EVRC)) {
+ mFormat = AUDIO_FORMAT_EVRC;
+ }
+ else {
+ return UNKNOWN_ERROR;
+ }
+
+ int32_t channelCount;
+ int32_t sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
+ CHECK_EQ(channelCount, 1);
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ CHECK_EQ(sampleRate, 8000);
+
+ mSource = source;
+
+ return OK;
+}
+
+status_t ExtendedWriter::start(MetaData *params) {
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource == NULL) {
+ ALOGE("NULL Source");
+ return UNKNOWN_ERROR;
+ }
+
+ if (mStarted && mPaused) {
+ mPaused = false;
+ mResumed = true;
+ return OK;
+ } else if (mStarted) {
+ ALOGE("Already startd, return");
+ return OK;
+ }
+
+ //space for header;
+ size_t headerSize = sizeof( struct QCPEVRCHeader );
+ uint8_t * header = (uint8_t *)malloc(headerSize);
+ memset( header, '?', headerSize);
+ fwrite( header, 1, headerSize, mFile );
+ mOffset += headerSize;
+ delete header;
+
+ status_t err = mSource->start();
+
+ if (err != OK) {
+ return err;
+ }
+
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ mReachedEOS = false;
+ mDone = false;
+
+ pthread_create(&mThread, &attr, ThreadWrapper, this);
+ pthread_attr_destroy(&attr);
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t ExtendedWriter::pause() {
+ if (!mStarted) {
+ return OK;
+ }
+ mPaused = true;
+ return OK;
+}
+
+status_t ExtendedWriter::stop() {
+ if (!mStarted) {
+ return OK;
+ }
+
+ mDone = true;
+
+ void *dummy;
+ pthread_join(mThread, &dummy);
+
+ status_t err = (status_t) dummy;
+ {
+ status_t status = mSource->stop();
+ if (err == OK &&
+ (status != OK && status != ERROR_END_OF_STREAM)) {
+ err = status;
+ }
+ }
+
+ mStarted = false;
+ return err;
+}
+
+bool ExtendedWriter::exceedsFileSizeLimit() {
+ if (mMaxFileSizeLimitBytes == 0) {
+ return false;
+ }
+ return mEstimatedSizeBytes >= mMaxFileSizeLimitBytes;
+}
+
+bool ExtendedWriter::exceedsFileDurationLimit() {
+ if (mMaxFileDurationLimitUs == 0) {
+ return false;
+ }
+ return mEstimatedDurationUs >= mMaxFileDurationLimitUs;
+}
+
+// static
+void *ExtendedWriter::ThreadWrapper(void *me) {
+ return (void *) static_cast<ExtendedWriter *>(me)->threadFunc();
+}
+
+status_t ExtendedWriter::threadFunc() {
+ mEstimatedDurationUs = 0;
+ mEstimatedSizeBytes = 0;
+ bool stoppedPrematurely = true;
+ int64_t previousPausedDurationUs = 0;
+ int64_t maxTimestampUs = 0;
+ status_t err = OK;
+
+ pid_t tid = gettid();
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_AUDIO);
+ prctl(PR_SET_NAME, (unsigned long)"ExtendedWriter", 0, 0, 0);
+ while (!mDone) {
+ MediaBuffer *buffer;
+ err = mSource->read(&buffer);
+
+ if (err != OK) {
+ break;
+ }
+
+ if (mPaused) {
+ buffer->release();
+ buffer = NULL;
+ continue;
+ }
+
+ mEstimatedSizeBytes += buffer->range_length();
+ if (exceedsFileSizeLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+ break;
+ }
+
+ int64_t timestampUs;
+ CHECK(buffer->meta_data()->findInt64(kKeyTime, &timestampUs));
+ if (timestampUs > mEstimatedDurationUs) {
+ mEstimatedDurationUs = timestampUs;
+ }
+ if (mResumed) {
+ previousPausedDurationUs += (timestampUs - maxTimestampUs - 20000);
+ mResumed = false;
+ }
+ timestampUs -= previousPausedDurationUs;
+ ALOGV("time stamp: %lld, previous paused duration: %lld",
+ timestampUs, previousPausedDurationUs);
+ if (timestampUs > maxTimestampUs) {
+ maxTimestampUs = timestampUs;
+ }
+
+ if (exceedsFileDurationLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
+ break;
+ }
+ ssize_t n = fwrite(
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ 1,
+ buffer->range_length(),
+ mFile);
+ mOffset += n;
+
+ if (n < (ssize_t)buffer->range_length()) {
+ buffer->release();
+ buffer = NULL;
+
+ break;
+ }
+
+ // XXX: How to tell it is stopped prematurely?
+ if (stoppedPrematurely) {
+ stoppedPrematurely = false;
+ }
+
+ buffer->release();
+ buffer = NULL;
+ }
+
+ if (stoppedPrematurely) {
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_TRACK_INFO_COMPLETION_STATUS, UNKNOWN_ERROR);
+ }
+
+ if ( mFormat == AUDIO_FORMAT_QCELP ) {
+ writeQCPHeader( );
+ }
+ else if ( mFormat == AUDIO_FORMAT_EVRC ) {
+ writeEVRCHeader( );
+ }
+
+ fflush(mFile);
+ fclose(mFile);
+ mFile = NULL;
+ mReachedEOS = true;
+ if (err == ERROR_END_OF_STREAM || (err == -ETIMEDOUT)) {
+ return OK;
+ }
+ return err;
+}
+
+bool ExtendedWriter::reachedEOS() {
+ return mReachedEOS;
+}
+
+status_t ExtendedWriter::writeQCPHeader() {
+ /* Common part */
+ struct QCPEVRCHeader header = {
+ {'R', 'I', 'F', 'F'}, 0, {'Q', 'L', 'C', 'M'}, /* Riff */
+ {'f', 'm', 't', ' '}, 150, 1, 0, 0, 0, 0,{0}, 0, {0},0,0,160,8000,16,0,{0},{0},{0}, /* Fmt */
+ {'v','r','a','t'}, 0, 0, 0, /* Vrat */
+ {'d','a','t','a'},0 /* Data */
+ };
+
+ fseeko(mFile, 0, SEEK_SET);
+ header.s_riff = (mOffset - 8);
+ header.data1 = (0x5E7F6D41);
+ header.data2 = (0xB115);
+ header.data3 = (0x11D0);
+ header.data4[0] = 0xBA;
+ header.data4[1] = 0x91;
+ header.data4[2] = 0x00;
+ header.data4[3] = 0x80;
+ header.data4[4] = 0x5F;
+ header.data4[5] = 0xB4;
+ header.data4[6] = 0xB9;
+ header.data4[7] = 0x7E;
+ header.ver = (0x0002);
+ memcpy(header.name, "Qcelp 13K", 9);
+ header.abps = (13000);
+ header.bytes_per_pkt = (35);
+ header.vr_num_of_rates = 5;
+ header.vr_bytes_per_pkt[0] = (0x0422);
+ header.vr_bytes_per_pkt[1] = (0x0310);
+ header.vr_bytes_per_pkt[2] = (0x0207);
+ header.vr_bytes_per_pkt[3] = (0x0103);
+ header.s_vrat = (0x00000008);
+ header.v_rate = (0x00000001);
+ header.size_in_pkts = (mOffset - sizeof( struct QCPEVRCHeader ))/ header.bytes_per_pkt;
+ header.s_data = mOffset - sizeof( struct QCPEVRCHeader );
+ fwrite( &header, 1, sizeof( struct QCPEVRCHeader ), mFile );
+ return OK;
+}
+
+status_t ExtendedWriter::writeEVRCHeader() {
+ /* Common part */
+ struct QCPEVRCHeader header = {
+ {'R', 'I', 'F', 'F'}, 0, {'Q', 'L', 'C', 'M'}, /* Riff */
+ {'f', 'm', 't', ' '}, 150, 1, 0, 0, 0, 0,{0}, 0, {0},0,0,160,8000,16,0,{0},{0},{0}, /* Fmt */
+ {'v','r','a','t'}, 0, 0, 0, /* Vrat */
+ {'d','a','t','a'},0 /* Data */
+ };
+
+ fseeko(mFile, 0, SEEK_SET);
+ header.s_riff = (mOffset - 8);
+ header.data1 = (0xe689d48d);
+ header.data2 = (0x9076);
+ header.data3 = (0x46b5);
+ header.data4[0] = 0x91;
+ header.data4[1] = 0xef;
+ header.data4[2] = 0x73;
+ header.data4[3] = 0x6a;
+ header.data4[4] = 0x51;
+ header.data4[5] = 0x00;
+ header.data4[6] = 0xce;
+ header.data4[7] = 0xb4;
+ header.ver = (0x0001);
+ memcpy(header.name, "TIA IS-127 Enhanced Variable Rate Codec, Speech Service Option 3", 64);
+ header.abps = (9600);
+ header.bytes_per_pkt = (23);
+ header.vr_num_of_rates = 4;
+ header.vr_bytes_per_pkt[0] = (0x0416);
+ header.vr_bytes_per_pkt[1] = (0x030a);
+ header.vr_bytes_per_pkt[2] = (0x0200);
+ header.vr_bytes_per_pkt[3] = (0x0102);
+ header.s_vrat = (0x00000008);
+ header.v_rate = (0x00000001);
+ header.size_in_pkts = (mOffset - sizeof( struct QCPEVRCHeader )) / header.bytes_per_pkt;
+ header.s_data = mOffset - sizeof( struct QCPEVRCHeader );
+ fwrite( &header, 1, sizeof( struct QCPEVRCHeader ), mFile );
+ return OK;
+}
+
+
+} // namespace android
diff --git a/media/libstagefright/FMRadioSource.cpp b/media/libstagefright/FMRadioSource.cpp
new file mode 100644
index 0000000..4229f23
--- /dev/null
+++ b/media/libstagefright/FMRadioSource.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Author: Stefan Ekenberg (stefan.ekenberg@stericsson.com) for ST-Ericsson
+ */
+
+#define LOG_TAG "FMRadioSource"
+#include <utils/Log.h>
+
+#include <media/stagefright/FMRadioSource.h>
+#include <media/AudioSystem.h>
+#include <private/media/AudioTrackShared.h>
+#include <cutils/compiler.h>
+
+namespace android {
+
+static const int kSampleRate = 48000;
+static const audio_format_t kAudioFormat = AUDIO_FORMAT_PCM_16_BIT;
+static const audio_channel_mask_t kChannelMask = AUDIO_CHANNEL_IN_STEREO;
+static const int kBufferTimeoutMs = 3000;
+
+FMRadioSource::FMRadioSource()
+ : mInitCheck(NO_INIT),
+ mStarted(false),
+ mSessionId(AudioSystem::newAudioSessionId()) {
+
+ // get FM Radio RX input
+ audio_io_handle_t input = AudioSystem::getInput(AUDIO_SOURCE_FM_RADIO_RX,
+ kSampleRate,
+ kAudioFormat,
+ kChannelMask,
+ mSessionId);
+ if (input == 0) {
+ ALOGE("Could not get audio input for FM Radio source");
+ mInitCheck = UNKNOWN_ERROR;
+ return;
+ }
+
+ // get frame count
+ int frameCount = 0;
+ status_t status = AudioRecord::getMinFrameCount(&frameCount, kSampleRate,
+ kAudioFormat, popcount(kChannelMask));
+ if (status != NO_ERROR) {
+ mInitCheck = status;
+ return;
+ }
+
+ // create the IAudioRecord
+ status = openRecord(frameCount, input);
+ if (status != NO_ERROR) {
+ mInitCheck = status;
+ return;
+ }
+
+ AudioSystem::acquireAudioSessionId(mSessionId);
+
+ mInitCheck = OK;
+ return;
+}
+
+FMRadioSource::~FMRadioSource() {
+ AudioSystem::releaseAudioSessionId(mSessionId);
+}
+
+status_t FMRadioSource::initCheck() const {
+ return mInitCheck;
+}
+
+ssize_t FMRadioSource::readAt(off64_t offset, void *data, size_t size) {
+ Buffer audioBuffer;
+
+ if (!mStarted) {
+ status_t err = mAudioRecord->start(AudioSystem::SYNC_EVENT_NONE, 0);
+ if (err == OK) {
+ mStarted = true;
+ } else {
+ ALOGE("Failed to start audio source");
+ return 0;
+ }
+ }
+
+ // acquire a strong reference on the IAudioRecord and IMemory so that they cannot be destroyed
+ // while we are accessing the cblk
+ sp<IAudioRecord> audioRecord = mAudioRecord;
+ sp<IMemory> iMem = mCblkMemory;
+ audio_track_cblk_t* cblk = mCblk;
+
+ audioBuffer.frameCount = size / cblk->frameSize;
+
+ status_t err = obtainBuffer(&audioBuffer);
+ if (err != NO_ERROR) {
+ ALOGE("Error obtaining an audio buffer, giving up (err:%d).", err);
+ return 0;
+ }
+
+ memcpy(data, audioBuffer.data, audioBuffer.size);
+ mCblk->stepUser(audioBuffer.frameCount);
+
+ return audioBuffer.size;
+}
+
+status_t FMRadioSource::getSize(off64_t *size) {
+ *size = 0;
+ return OK;
+}
+
+// -------------------------------------------------------------------------
+
+status_t FMRadioSource::openRecord(int frameCount, audio_io_handle_t input)
+{
+ status_t status;
+ const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
+ if (audioFlinger == 0) {
+ return NO_INIT;
+ }
+
+ sp<IAudioRecord> record = audioFlinger->openRecord(getpid(), input,
+ kSampleRate,
+ kAudioFormat,
+ kChannelMask,
+ frameCount,
+ IAudioFlinger::TRACK_DEFAULT,
+ gettid(),
+ &mSessionId,
+ &status);
+
+ if (record == 0) {
+ ALOGE("AudioFlinger could not create record track, status: %d", status);
+ return status;
+ }
+
+ sp<IMemory> cblk = record->getCblk();
+ if (cblk == 0) {
+ ALOGE("Could not get control block");
+ return NO_INIT;
+ }
+ mAudioRecord = record;
+ mCblkMemory = cblk;
+ mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer());
+ mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
+ android_atomic_and(~CBLK_DIRECTION_MSK, &mCblk->flags);
+ return NO_ERROR;
+}
+
+status_t FMRadioSource::obtainBuffer(Buffer* audioBuffer)
+{
+ status_t result = NO_ERROR;
+ uint32_t framesReq = audioBuffer->frameCount;
+
+ audioBuffer->frameCount = 0;
+ audioBuffer->size = 0;
+
+ mCblk->lock.lock();
+ uint32_t framesReady = mCblk->framesReady();
+ if (framesReady == 0) {
+ do {
+ result = mCblk->cv.waitRelative(mCblk->lock, milliseconds(kBufferTimeoutMs));
+ if (CC_UNLIKELY(result != NO_ERROR)) {
+ ALOGE("obtainBuffer timed out (is the CPU pegged?) "
+ "user=%08x, server=%08x", mCblk->user, mCblk->server);
+ mCblk->lock.unlock();
+ return TIMED_OUT;
+ }
+
+ framesReady = mCblk->framesReady();
+ } while (framesReady == 0);
+ }
+ mCblk->lock.unlock();
+
+ if (framesReq > framesReady) {
+ framesReq = framesReady;
+ }
+
+ uint32_t u = mCblk->user;
+ uint32_t bufferEnd = mCblk->userBase + mCblk->frameCount;
+
+ if (framesReq > bufferEnd - u) {
+ framesReq = bufferEnd - u;
+ }
+
+ audioBuffer->frameCount = framesReq;
+ audioBuffer->size = framesReq * mCblk->frameSize;
+ audioBuffer->data = (int8_t*)mCblk->buffer(u);
+
+ return NO_ERROR;
+}
+
+} // namespace android
diff --git a/media/libstagefright/LPAPlayer.cpp b/media/libstagefright/LPAPlayer.cpp
new file mode 100755
index 0000000..1855804
--- /dev/null
+++ b/media/libstagefright/LPAPlayer.cpp
@@ -0,0 +1,833 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Not a Contribution. Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "LPAPlayer"
+
+#include <utils/Log.h>
+#include <utils/threads.h>
+
+#include <signal.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+#include <binder/IPCThreadState.h>
+#include <media/AudioTrack.h>
+
+#include <media/stagefright/LPAPlayer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include <hardware_legacy/power.h>
+
+#include <linux/unistd.h>
+
+#include "include/AwesomePlayer.h"
+#include <powermanager/PowerManager.h>
+
+static const char mName[] = "LPAPlayer";
+
+#define MEM_BUFFER_SIZE 524288
+#define MEM_BUFFER_COUNT 2
+
+#define PCM_FORMAT 2
+#define NUM_FDS 2
+namespace android {
+int LPAPlayer::objectsAlive = 0;
+bool LPAPlayer::mLpaInProgress = false;
+
+LPAPlayer::LPAPlayer(
+ const sp<MediaPlayerBase::AudioSink> &audioSink, bool &initCheck,
+ AwesomePlayer *observer)
+:AudioPlayer(audioSink,observer),
+mPositionTimeMediaUs(-1),
+mPositionTimeRealUs(-1),
+mInternalSeeking(false),
+mStarted(false),
+mA2DPEnabled(false),
+mSampleRate(0),
+mLatencyUs(0),
+mFrameSize(0),
+mNumFramesPlayed(0),
+mNumFramesPlayedSysTimeUs(0),
+mInputBuffer(NULL),
+mSeeking(false),
+mReachedEOS(false),
+mReachedOutputEOS(false),
+mFinalStatus(OK),
+mSeekTimeUs(0),
+mPauseTime(0),
+mIsFirstBuffer(false),
+mFirstBufferResult(OK),
+mFirstBuffer(NULL),
+mAudioSink(audioSink),
+mObserver(observer),
+mTrackType(TRACK_NONE){
+ ALOGV("LPAPlayer::LPAPlayer() ctor");
+ objectsAlive++;
+ mLpaInProgress = true;
+ mTimeStarted = 0;
+ mTimePlayed = 0;
+ numChannels =0;
+ mPaused = false;
+ mIsA2DPEnabled = false;
+ mAudioFlinger = NULL;
+ AudioFlingerClient = NULL;
+ /* Initialize Suspend/Resume related variables */
+ mQueue.start();
+ mQueueStarted = true;
+ mPauseEvent = new TimedEvent(this, &LPAPlayer::onPauseTimeOut);
+ mPauseEventPending = false;
+ getAudioFlinger();
+ ALOGV("Registering client with AudioFlinger");
+ mAudioFlinger->registerClient(AudioFlingerClient);
+
+ mIsAudioRouted = false;
+
+ initCheck = true;
+
+}
+
+LPAPlayer::~LPAPlayer() {
+ ALOGV("LPAPlayer::~LPAPlayer()");
+ if (mQueueStarted) {
+ mQueue.stop();
+ }
+
+ reset();
+ if (mAudioFlinger != NULL) {
+ mAudioFlinger->deregisterClient(AudioFlingerClient);
+ }
+ objectsAlive--;
+ mLpaInProgress = false;
+
+}
+
+void LPAPlayer::getAudioFlinger() {
+ Mutex::Autolock _l(AudioFlingerLock);
+
+ if ( mAudioFlinger.get() == 0 ) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder;
+ do {
+ binder = sm->getService(String16("media.audio_flinger"));
+ if ( binder != 0 )
+ break;
+ ALOGW("AudioFlinger not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while ( true );
+ if ( AudioFlingerClient == NULL ) {
+ AudioFlingerClient = new AudioFlingerLPAdecodeClient(this);
+ }
+
+ binder->linkToDeath(AudioFlingerClient);
+ mAudioFlinger = interface_cast<IAudioFlinger>(binder);
+ }
+ ALOGE_IF(mAudioFlinger==0, "no AudioFlinger!?");
+}
+
+LPAPlayer::AudioFlingerLPAdecodeClient::AudioFlingerLPAdecodeClient(void *obj)
+{
+ ALOGV("LPAPlayer::AudioFlingerLPAdecodeClient::AudioFlingerLPAdecodeClient");
+ pBaseClass = (LPAPlayer*)obj;
+}
+
+void LPAPlayer::AudioFlingerLPAdecodeClient::binderDied(const wp<IBinder>& who) {
+ Mutex::Autolock _l(pBaseClass->AudioFlingerLock);
+
+ pBaseClass->mAudioFlinger.clear();
+ ALOGW("AudioFlinger server died!");
+}
+
+void LPAPlayer::AudioFlingerLPAdecodeClient::ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) {
+ ALOGV("ioConfigChanged() event %d", event);
+
+ if (event != AudioSystem::A2DP_OUTPUT_STATE) {
+ return;
+ }
+
+ switch ( event ) {
+ case AudioSystem::A2DP_OUTPUT_STATE:
+ {
+ ALOGV("ioConfigChanged() A2DP_OUTPUT_STATE iohandle is %d with A2DPEnabled in %d", ioHandle, pBaseClass->mIsA2DPEnabled);
+ if ( -1 == ioHandle ) {
+ if ( pBaseClass->mIsA2DPEnabled ) {
+ pBaseClass->mIsA2DPEnabled = false;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+ ALOGV("ioConfigChanged:: A2DP Disabled");
+ }
+ } else {
+ if ( !pBaseClass->mIsA2DPEnabled ) {
+
+ pBaseClass->mIsA2DPEnabled = true;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+
+ ALOGV("ioConfigChanged:: A2DP Enabled");
+ }
+ }
+ }
+ break;
+ }
+ ALOGV("ioConfigChanged Out");
+
+}
+
+void LPAPlayer::handleA2DPSwitch() {
+ pthread_cond_signal(&decoder_cv);
+}
+
+void LPAPlayer::setSource(const sp<MediaSource> &source) {
+ CHECK(mSource == NULL);
+ ALOGV("Setting source from LPA Player");
+ mSource = source;
+}
+
+status_t LPAPlayer::start(bool sourceAlreadyStarted) {
+ CHECK(!mStarted);
+ CHECK(mSource != NULL);
+
+ ALOGV("start: sourceAlreadyStarted %d", sourceAlreadyStarted);
+ //Check if the source is started, start it
+ status_t err;
+ if (!sourceAlreadyStarted) {
+ err = mSource->start();
+
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ //Create decoder and a2dp notification thread and initialize all the
+ //mutexes and coditional variables
+ createThreads();
+ ALOGV("All Threads Created.");
+
+ // We allow an optional INFO_FORMAT_CHANGED at the very beginning
+ // of playback, if there is one, getFormat below will retrieve the
+ // updated format, if there isn't, we'll stash away the valid buffer
+ // of data to be used on the first audio callback.
+
+ CHECK(mFirstBuffer == NULL);
+
+ MediaSource::ReadOptions options;
+ if (mSeeking) {
+ options.setSeekTo(mSeekTimeUs);
+ mSeeking = false;
+ }
+
+ mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
+ if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
+ ALOGV("INFO_FORMAT_CHANGED!!!");
+ CHECK(mFirstBuffer == NULL);
+ mFirstBufferResult = OK;
+ mIsFirstBuffer = false;
+ } else {
+ mIsFirstBuffer = true;
+ }
+
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ CHECK(success);
+ CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
+
+ success = format->findInt32(kKeySampleRate, &mSampleRate);
+ CHECK(success);
+
+ success = format->findInt32(kKeyChannelCount, &numChannels);
+ CHECK(success);
+
+ if(!format->findInt32(kKeyChannelMask, &mChannelMask)) {
+ // log only when there's a risk of ambiguity of channel mask selection
+ ALOGI_IF(numChannels > 2,
+ "source format didn't specify channel mask, using (%d) channel order", numChannels);
+ mChannelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
+ }
+
+ err = setupAudioSink();
+
+ if (err != OK) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (!sourceAlreadyStarted) {
+ mSource->stop();
+ }
+
+ ALOGE("Opening a routing session failed");
+ return err;
+ }
+
+ mIsAudioRouted = true;
+ mStarted = true;
+ mAudioSink->start();
+ mTimeStarted = nanoseconds_to_microseconds(systemTime(SYSTEM_TIME_MONOTONIC));
+ ALOGV("Waking up decoder thread");
+ pthread_cond_signal(&decoder_cv);
+
+ return OK;
+}
+
+status_t LPAPlayer::seekTo(int64_t time_us) {
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("seekTo: time_us %lld", time_us);
+ if ( mReachedEOS ) {
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ }
+ mSeeking = true;
+ mSeekTimeUs = time_us;
+ mTimePlayed = time_us;
+ mTimeStarted = 0;
+ ALOGV("In seekTo(), mSeekTimeUs %lld",mSeekTimeUs);
+ mAudioSink->flush();
+ pthread_cond_signal(&decoder_cv);
+ //TODO: Update the mPauseTime
+ return OK;
+}
+
+void LPAPlayer::pause(bool playPendingSamples) {
+ CHECK(mStarted);
+ if (mPaused) {
+ return;
+ }
+ ALOGV("pause: playPendingSamples %d", playPendingSamples);
+ mPaused = true;
+ if (playPendingSamples) {
+ if (!mIsA2DPEnabled) {
+ if (!mPauseEventPending) {
+ ALOGV("Posting an event for Pause timeout");
+ mQueue.postEventWithDelay(mPauseEvent, LPA_PAUSE_TIMEOUT_USEC);
+ mPauseEventPending = true;
+ }
+ if (mAudioSink.get() != NULL)
+ mAudioSink->pause();
+ }
+ else {
+ if (mAudioSink.get() != NULL)
+ mAudioSink->stop();
+
+ }
+ } else {
+ if (!mIsA2DPEnabled) {
+ if(!mPauseEventPending) {
+ ALOGV("Posting an event for Pause timeout");
+ mQueue.postEventWithDelay(mPauseEvent, LPA_PAUSE_TIMEOUT_USEC);
+ mPauseEventPending = true;
+ }
+ if (mAudioSink.get() != NULL)
+ mAudioSink->pause();
+ } else {
+ if (mAudioSink.get() != NULL) {
+ mAudioSink->pause();
+ }
+ }
+ }
+ if(mTimeStarted != 0) {
+ mTimePlayed += (nanoseconds_to_microseconds(systemTime(SYSTEM_TIME_MONOTONIC)) - mTimeStarted);
+ }
+}
+
+void LPAPlayer::resume() {
+ ALOGV("resume: mPaused %d",mPaused);
+ if ( mPaused) {
+ CHECK(mStarted);
+ if (!mIsA2DPEnabled) {
+ if(mPauseEventPending) {
+ ALOGV("Resume(): Cancelling the puaseTimeout event");
+ mPauseEventPending = false;
+ mQueue.cancelEvent(mPauseEvent->eventID());
+ }
+
+ }
+
+ setupAudioSink();
+
+ mPaused = false;
+ mIsAudioRouted = true;
+ mAudioSink->start();
+ mTimeStarted = nanoseconds_to_microseconds(systemTime(SYSTEM_TIME_MONOTONIC));
+ pthread_cond_signal(&decoder_cv);
+ }
+}
+
+//static
+size_t LPAPlayer::AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *buffer, size_t size, void *cookie) {
+ if (buffer == NULL && size == AudioTrack::EVENT_UNDERRUN) {
+ LPAPlayer *me = (LPAPlayer *)cookie;
+ me->mReachedEOS = true;
+ me->mReachedOutputEOS = true;
+ ALOGV("postAudioEOS");
+ me->mObserver->postAudioEOS(0);
+ }
+ return 1;
+}
+
+void LPAPlayer::reset() {
+
+ // Close the audiosink after all the threads exited to make sure
+ ALOGV("Reset called!!!!!");
+ mReachedEOS = true;
+ //TODO: Release Wake lock
+
+ // make sure Decoder thread has exited
+ requestAndWaitForDecoderThreadExit();
+ requestAndWaitForA2DPNotificationThreadExit();
+ if (mIsAudioRouted) {
+ mAudioSink->stop();
+ mAudioSink->close();
+ }
+ mAudioSink.clear();
+ // Make sure to release any buffer we hold onto so that the
+ // source is able to stop().
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (mInputBuffer != NULL) {
+ ALOGV("AudioPlayer releasing input buffer.");
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ mSource->stop();
+
+ // The following hack is necessary to ensure that the OMX
+ // component is completely released by the time we may try
+ // to instantiate it again.
+ wp<MediaSource> tmp = mSource;
+ mSource.clear();
+ while (tmp.promote() != NULL) {
+ usleep(1000);
+ }
+
+ mPositionTimeMediaUs = -1;
+ mPositionTimeRealUs = -1;
+ mSeeking = false;
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ mFinalStatus = OK;
+ mStarted = false;
+}
+
+
+bool LPAPlayer::isSeeking() {
+ Mutex::Autolock autoLock(mLock);
+ return mSeeking;
+}
+
+bool LPAPlayer::reachedEOS(status_t *finalStatus) {
+ *finalStatus = OK;
+ Mutex::Autolock autoLock(mLock);
+ *finalStatus = mFinalStatus;
+ return mReachedOutputEOS;
+}
+
+
+void *LPAPlayer::decoderThreadWrapper(void *me) {
+ static_cast<LPAPlayer *>(me)->decoderThreadEntry();
+ return NULL;
+}
+
+
+void LPAPlayer::decoderThreadEntry() {
+
+ pthread_mutex_lock(&decoder_mutex);
+
+ pid_t tid = gettid();
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_AUDIO);
+ prctl(PR_SET_NAME, (unsigned long)"LPA DecodeThread", 0, 0, 0);
+
+ ALOGV("decoderThreadEntry wait for signal \n");
+ if (!mStarted) {
+ pthread_cond_wait(&decoder_cv, &decoder_mutex);
+ }
+ ALOGV("decoderThreadEntry ready to work \n");
+ pthread_mutex_unlock(&decoder_mutex);
+ if (killDecoderThread) {
+ return;
+ }
+ void* local_buf = malloc(MEM_BUFFER_SIZE);
+ int bytesWritten = 0;
+ while (!killDecoderThread) {
+
+ setupAudioSink();
+ if (mTimeStarted == 0) {
+ mTimeStarted = nanoseconds_to_microseconds(systemTime(SYSTEM_TIME_MONOTONIC));
+ }
+
+ if (mReachedEOS || mPaused || !mIsAudioRouted || mIsA2DPEnabled) {
+ ALOGD("DecoderThread taking mutex mReachedEOS %d mPaused %d mIsAudioRouted %d mIsA2DPEnabled %d"
+ , mReachedEOS, mPaused, mIsAudioRouted, mIsA2DPEnabled);
+ pthread_mutex_lock(&decoder_mutex);
+ pthread_cond_wait(&decoder_cv, &decoder_mutex);
+ pthread_mutex_unlock(&decoder_mutex);
+ ALOGD("DecoderThread woken up ");
+ continue;
+ }
+
+ if (!mIsA2DPEnabled) {
+ ALOGV("FillBuffer: MemBuffer size %d", MEM_BUFFER_SIZE);
+ ALOGV("Fillbuffer started");
+ //TODO: Add memset
+ bytesWritten = fillBuffer(local_buf, MEM_BUFFER_SIZE);
+ ALOGV("FillBuffer completed bytesToWrite %d", bytesWritten);
+
+ if(!killDecoderThread) {
+ mAudioSink->write(local_buf, bytesWritten);
+ }
+ }
+ }
+
+ free(local_buf);
+
+ //TODO: Call fillbuffer with different size and write to mAudioSink()
+}
+
+void LPAPlayer::createThreads() {
+
+ //Initialize all the Mutexes and Condition Variables
+ pthread_mutex_init(&decoder_mutex, NULL);
+ pthread_mutex_init(&audio_sink_setup_mutex, NULL);
+ pthread_cond_init (&decoder_cv, NULL);
+
+ // Create 4 threads Effect, decoder, event and A2dp
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ killDecoderThread = false;
+ killA2DPNotificationThread = false;
+
+ decoderThreadAlive = true;
+
+ ALOGV("Creating decoder Thread");
+ pthread_create(&decoderThread, &attr, decoderThreadWrapper, this);
+
+ pthread_attr_destroy(&attr);
+}
+
+size_t LPAPlayer::fillBuffer(void *data, size_t size) {
+
+ if (mReachedEOS) {
+ return 0;
+ }
+
+ bool postSeekComplete = false;
+
+ size_t size_done = 0;
+ size_t size_remaining = size;
+ while (size_remaining > 0) {
+ MediaSource::ReadOptions options;
+
+ {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mSeeking) {
+ mInternalSeeking = false;
+ }
+ if (mSeeking || mInternalSeeking) {
+ if (mIsFirstBuffer) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+ mIsFirstBuffer = false;
+ }
+
+ options.setSeekTo(mSeekTimeUs);
+
+ if (mInputBuffer != NULL) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ mSeeking = false;
+ if (mObserver && !mInternalSeeking) {
+ ALOGV("fillBuffer: Posting audio seek complete event");
+ postSeekComplete = true;
+ }
+ mInternalSeeking = false;
+ }
+ }
+
+ if (mInputBuffer == NULL) {
+ status_t err;
+
+ if (mIsFirstBuffer) {
+ mInputBuffer = mFirstBuffer;
+ mFirstBuffer = NULL;
+ err = mFirstBufferResult;
+
+ mIsFirstBuffer = false;
+ } else {
+ err = mSource->read(&mInputBuffer, &options);
+ }
+
+ CHECK((err == OK && mInputBuffer != NULL)
+ || (err != OK && mInputBuffer == NULL));
+
+ Mutex::Autolock autoLock(mLock);
+
+ if (err != OK) {
+ mReachedEOS = true;
+ mFinalStatus = err;
+ break;
+ }
+
+ CHECK(mInputBuffer->meta_data()->findInt64(
+ kKeyTime, &mPositionTimeMediaUs));
+ mFrameSize = mAudioSink->frameSize();
+ mPositionTimeRealUs =
+ ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
+ / mSampleRate;
+ }
+ if (mInputBuffer->range_length() == 0) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+
+ continue;
+ }
+
+ size_t copy = size_remaining;
+ if (copy > mInputBuffer->range_length()) {
+ copy = mInputBuffer->range_length();
+ }
+
+ memcpy((char *)data + size_done,
+ (const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
+ copy);
+
+ mInputBuffer->set_range(mInputBuffer->range_offset() + copy,
+ mInputBuffer->range_length() - copy);
+
+ size_done += copy;
+ size_remaining -= copy;
+ }
+
+ if (postSeekComplete) {
+ mObserver->postAudioSeekComplete();
+ }
+
+ return size_done;
+}
+
+int64_t LPAPlayer::getRealTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ return getRealTimeUsLocked();
+}
+
+
+int64_t LPAPlayer::getRealTimeUsLocked(){
+ //Used for AV sync: irrelevant API for LPA.
+ return 0;
+}
+
+
+int64_t LPAPlayer::getMediaTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("getMediaTimeUs() mPaused %d mSeekTimeUs %lld", mPaused, mSeekTimeUs);
+ if(mPaused || mTimeStarted == 0) {
+ return mTimePlayed;
+ } else {
+ return nanoseconds_to_microseconds(systemTime(SYSTEM_TIME_MONOTONIC)) - mTimeStarted + mTimePlayed;
+ }
+}
+
+bool LPAPlayer::getMediaTimeMapping(
+ int64_t *realtime_us, int64_t *mediatime_us) {
+ Mutex::Autolock autoLock(mLock);
+
+ *realtime_us = mPositionTimeRealUs;
+ *mediatime_us = mPositionTimeMediaUs;
+
+ return mPositionTimeRealUs != -1 && mPositionTimeMediaUs != -1;
+}
+
+void LPAPlayer::requestAndWaitForDecoderThreadExit() {
+
+ if (!decoderThreadAlive)
+ return;
+ killDecoderThread = true;
+ if (mIsAudioRouted)
+ mAudioSink->flush();
+ pthread_cond_signal(&decoder_cv);
+ pthread_join(decoderThread,NULL);
+ ALOGV("decoder thread killed");
+
+}
+
+void LPAPlayer::requestAndWaitForA2DPNotificationThreadExit() {
+ if (!a2dpNotificationThreadAlive)
+ return;
+ killA2DPNotificationThread = true;
+ pthread_cond_signal(&a2dp_notification_cv);
+ pthread_join(A2DPNotificationThread,NULL);
+ ALOGV("a2dp notification thread killed");
+}
+
+void LPAPlayer::onPauseTimeOut() {
+ ALOGV("onPauseTimeOut");
+ if (!mPauseEventPending) {
+ return;
+ }
+ mPauseEventPending = false;
+ if(!mIsA2DPEnabled) {
+ // 1.) Set seek flags
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ mSeekTimeUs = mTimePlayed;
+
+ // 2.) Close routing Session
+ mAudioSink->close();
+ mIsAudioRouted = false;
+ mTrackType = TRACK_NONE;
+ }
+}
+
+status_t LPAPlayer::setupAudioSink()
+{
+ status_t err = NO_ERROR;
+
+ ALOGD("setupAudioSink with A2DP(%d) tracktype(%d)", mIsA2DPEnabled, mTrackType);
+ pthread_mutex_lock(&audio_sink_setup_mutex);
+
+ if(true == mIsA2DPEnabled) {
+ ALOGE("setupAudioSink:dIRECT track --> rEGULAR track");
+
+ if(mTrackType == TRACK_REGULAR) {
+ ALOGD("setupAudioSink:rEGULAR Track already opened");
+ pthread_mutex_unlock(&audio_sink_setup_mutex);
+ return err;
+ }
+
+ if(mTrackType == TRACK_DIRECT) {
+ ALOGD("setupAudioSink:Close dIRECT track");
+ mAudioSink->stop();
+ mAudioSink->close();
+ }
+
+ ALOGD("setupAudioSink:Open rEGULAR track");
+
+ ALOGD("setupAudioSink:mAudiosink->open() mSampleRate %d, numChannels %d, mChannelMask %d, flags %d",
+ mSampleRate, numChannels, mChannelMask, 0);
+
+ err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, AUDIO_FORMAT_PCM_16_BIT,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &LPAPlayer::AudioCallback,
+ this,
+ (audio_output_flags_t)0);
+ if (err != NO_ERROR){
+ ALOGE("setupAudioSink:Audio sink open failed.");
+ }
+
+ ALOGD("setupAudioSink:Start rEGULAR track");
+ mAudioSink->start();
+
+ ALOGD("setupAudioSink:rEGULAR track opened");
+ mTrackType = TRACK_REGULAR;
+
+ } else if (false == mIsA2DPEnabled){
+ ALOGE("setupAudioSink:rEGULAR track --> dIRECT track");
+
+ if(mTrackType == TRACK_DIRECT) {
+ ALOGD("setupAudioSink:Direct Track already opened");
+ pthread_mutex_unlock(&audio_sink_setup_mutex);
+ return err;
+ }
+
+ if(mTrackType == TRACK_REGULAR) {
+ ALOGD("setupAudioSink:Close rEGULAR track");
+ mAudioSink->stop();
+ mAudioSink->close();
+ }
+
+ ALOGD("setupAudioSink:Open dIRECT track");
+
+ ALOGD("setupAudioSink:mAudiosink->open() mSampleRate %d, numChannels %d, mChannelMask %d, flags %d",
+ mSampleRate, numChannels, mChannelMask, (AUDIO_OUTPUT_FLAG_LPA | AUDIO_OUTPUT_FLAG_DIRECT));
+
+ err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, AUDIO_FORMAT_PCM_16_BIT,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &LPAPlayer::AudioSinkCallback,
+ this,
+ (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_LPA | AUDIO_OUTPUT_FLAG_DIRECT));
+ if (err != NO_ERROR){
+ ALOGE("setupAudioSink:Audio sink open failed.");
+ }
+
+ mTrackType = TRACK_DIRECT;
+ ALOGD("setupAudioSink:dIRECT track opened");
+ }
+ pthread_mutex_unlock(&audio_sink_setup_mutex);
+
+ return err;
+
+}
+
+
+size_t LPAPlayer::AudioCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *buffer, size_t size, void *cookie) {
+
+ return (static_cast<LPAPlayer *>(cookie)->AudioCallback(cookie, buffer, size));
+}
+
+size_t LPAPlayer::AudioCallback(void *cookie, void *buffer, size_t size) {
+ size_t size_done = 0;
+ uint32_t numFramesPlayedOut;
+ LPAPlayer *me = (LPAPlayer *)cookie;
+
+ if(me->mReachedOutputEOS)
+ return 0;
+
+ if (buffer == NULL && size == AudioTrack::EVENT_UNDERRUN) {
+ ALOGE("Underrun");
+ return 0;
+ } else {
+ size_done = fillBuffer(buffer, size);
+ ALOGD("RegularTrack:fillbuffersize %d %d", size_done, size);
+ if(mReachedEOS) {
+ me->mReachedOutputEOS = true;
+ me->mObserver->postAudioEOS();
+ ALOGE("postEOSDelayUs ");
+ }
+ return size_done;
+ }
+}
+
+} //namespace android
diff --git a/media/libstagefright/LPAPlayerALSA.cpp b/media/libstagefright/LPAPlayerALSA.cpp
new file mode 100644
index 0000000..38d6bac
--- /dev/null
+++ b/media/libstagefright/LPAPlayerALSA.cpp
@@ -0,0 +1,793 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDDEBUG 0
+//#define LOG_NDEBUG 0
+#define LOG_TAG "LPAPlayerALSA"
+
+#include <utils/Log.h>
+#include <utils/threads.h>
+
+#include <signal.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+#include <binder/IPCThreadState.h>
+#include <media/AudioTrack.h>
+
+#include <media/stagefright/LPAPlayer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include <hardware_legacy/power.h>
+
+#include <linux/unistd.h>
+
+#include "include/AwesomePlayer.h"
+#include <powermanager/PowerManager.h>
+
+static const char mName[] = "LPAPlayer";
+
+#define MEM_BUFFER_SIZE 262144
+#define MEM_BUFFER_COUNT 4
+
+#define PCM_FORMAT 2
+#define NUM_FDS 2
+namespace android {
+int LPAPlayer::objectsAlive = 0;
+
+LPAPlayer::LPAPlayer(
+ const sp<MediaPlayerBase::AudioSink> &audioSink, bool &initCheck,
+ AwesomePlayer *observer)
+:AudioPlayer(audioSink,observer),
+mPositionTimeMediaUs(-1),
+mPositionTimeRealUs(-1),
+mInternalSeeking(false),
+mStarted(false),
+mA2DPEnabled(false),
+mSampleRate(0),
+mLatencyUs(0),
+mFrameSize(0),
+mNumFramesPlayed(0),
+mNumFramesPlayedSysTimeUs(0),
+mInputBuffer(NULL),
+mSeeking(false),
+mReachedEOS(false),
+mReachedOutputEOS(false),
+mFinalStatus(OK),
+mSeekTimeUs(0),
+mPauseTime(0),
+mIsFirstBuffer(false),
+mFirstBufferResult(OK),
+mFirstBuffer(NULL),
+mAudioSink(audioSink),
+mObserver(observer) {
+ ALOGV("LPAPlayer::LPAPlayer() ctor");
+ objectsAlive++;
+ numChannels =0;
+ mPaused = false;
+ mIsA2DPEnabled = false;
+ mAudioFlinger = NULL;
+ AudioFlingerClient = NULL;
+ /* Initialize Suspend/Resume related variables */
+ mQueue.start();
+ mQueueStarted = true;
+ mPauseEvent = new TimedEvent(this, &LPAPlayer::onPauseTimeOut);
+ mPauseEventPending = false;
+ getAudioFlinger();
+ ALOGV("Registering client with AudioFlinger");
+ //mAudioFlinger->registerClient(AudioFlingerClient);
+
+ mIsAudioRouted = false;
+
+ initCheck = true;
+
+}
+
+LPAPlayer::~LPAPlayer() {
+ ALOGV("LPAPlayer::~LPAPlayer()");
+ if (mQueueStarted) {
+ mQueue.stop();
+ }
+
+ reset();
+
+ //mAudioFlinger->deregisterClient(AudioFlingerClient);
+ objectsAlive--;
+}
+
+void LPAPlayer::getAudioFlinger() {
+ Mutex::Autolock _l(AudioFlingerLock);
+
+ if ( mAudioFlinger.get() == 0 ) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder;
+ do {
+ binder = sm->getService(String16("media.audio_flinger"));
+ if ( binder != 0 )
+ break;
+ ALOGW("AudioFlinger not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while ( true );
+ if ( AudioFlingerClient == NULL ) {
+ AudioFlingerClient = new AudioFlingerLPAdecodeClient(this);
+ }
+
+ binder->linkToDeath(AudioFlingerClient);
+ mAudioFlinger = interface_cast<IAudioFlinger>(binder);
+ }
+ ALOGE_IF(mAudioFlinger==0, "no AudioFlinger!?");
+}
+
+LPAPlayer::AudioFlingerLPAdecodeClient::AudioFlingerLPAdecodeClient(void *obj)
+{
+ ALOGV("LPAPlayer::AudioFlingerLPAdecodeClient::AudioFlingerLPAdecodeClient");
+ pBaseClass = (LPAPlayer*)obj;
+}
+
+void LPAPlayer::AudioFlingerLPAdecodeClient::binderDied(const wp<IBinder>& who) {
+ Mutex::Autolock _l(pBaseClass->AudioFlingerLock);
+
+ pBaseClass->mAudioFlinger.clear();
+ ALOGW("AudioFlinger server died!");
+}
+
+void LPAPlayer::AudioFlingerLPAdecodeClient::ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) {
+ ALOGV("ioConfigChanged() event %d", event);
+ /*
+ if ( event != AudioSystem::A2DP_OUTPUT_STATE &&
+ event != AudioSystem::EFFECT_CONFIG_CHANGED) {
+ return;
+ }
+
+ switch ( event ) {
+ case AudioSystem::A2DP_OUTPUT_STATE:
+ {
+ ALOGV("ioConfigChanged() A2DP_OUTPUT_STATE iohandle is %d with A2DPEnabled in %d", ioHandle, pBaseClass->mIsA2DPEnabled);
+ if ( -1 == ioHandle ) {
+ if ( pBaseClass->mIsA2DPEnabled ) {
+ pBaseClass->mIsA2DPEnabled = false;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+ ALOGV("ioConfigChanged:: A2DP Disabled");
+ }
+ } else {
+ if ( !pBaseClass->mIsA2DPEnabled ) {
+
+ pBaseClass->mIsA2DPEnabled = true;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+
+ ALOGV("ioConfigChanged:: A2DP Enabled");
+ }
+ }
+ }
+ break;
+ }
+ ALOGV("ioConfigChanged Out");
+ */
+}
+
+void LPAPlayer::handleA2DPSwitch() {
+ //TODO: Implement
+}
+
+void LPAPlayer::setSource(const sp<MediaSource> &source) {
+ CHECK(mSource == NULL);
+ ALOGV("Setting source from LPA Player");
+ mSource = source;
+}
+
+status_t LPAPlayer::start(bool sourceAlreadyStarted) {
+ CHECK(!mStarted);
+ CHECK(mSource != NULL);
+
+ ALOGV("start: sourceAlreadyStarted %d", sourceAlreadyStarted);
+ //Check if the source is started, start it
+ status_t err;
+ if (!sourceAlreadyStarted) {
+ err = mSource->start();
+
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ //Create decoder and a2dp notification thread and initialize all the
+ //mutexes and coditional variables
+ createThreads();
+ ALOGV("All Threads Created.");
+
+ // We allow an optional INFO_FORMAT_CHANGED at the very beginning
+ // of playback, if there is one, getFormat below will retrieve the
+ // updated format, if there isn't, we'll stash away the valid buffer
+ // of data to be used on the first audio callback.
+
+ CHECK(mFirstBuffer == NULL);
+
+ MediaSource::ReadOptions options;
+ if (mSeeking) {
+ options.setSeekTo(mSeekTimeUs);
+ mSeeking = false;
+ }
+
+ mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
+ if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
+ ALOGV("INFO_FORMAT_CHANGED!!!");
+ CHECK(mFirstBuffer == NULL);
+ mFirstBufferResult = OK;
+ mIsFirstBuffer = false;
+ } else {
+ mIsFirstBuffer = true;
+ }
+
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ CHECK(success);
+ CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
+
+ success = format->findInt32(kKeySampleRate, &mSampleRate);
+ CHECK(success);
+
+ success = format->findInt32(kKeyChannelCount, &numChannels);
+ CHECK(success);
+
+ if(!format->findInt32(kKeyChannelMask, &mChannelMask)) {
+ // log only when there's a risk of ambiguity of channel mask selection
+ ALOGI_IF(numChannels > 2,
+ "source format didn't specify channel mask, using (%d) channel order", numChannels);
+ mChannelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
+ }
+ audio_output_flags_t flags = (audio_output_flags_t) (AUDIO_OUTPUT_FLAG_LPA |
+ AUDIO_OUTPUT_FLAG_DIRECT);
+ ALOGV("mAudiosink->open() mSampleRate %d, numChannels %d, mChannelMask %d, flags %d",mSampleRate, numChannels, mChannelMask, flags);
+ err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, AUDIO_FORMAT_PCM_16_BIT,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &LPAPlayer::AudioSinkCallback,
+ this,
+ (mA2DPEnabled ? AUDIO_OUTPUT_FLAG_NONE : flags));
+
+ if (err != OK) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (!sourceAlreadyStarted) {
+ mSource->stop();
+ }
+
+ ALOGE("Opening a routing session failed");
+ return err;
+ }
+
+ mIsAudioRouted = true;
+ mStarted = true;
+ mAudioSink->start();
+ ALOGV("Waking up decoder thread");
+ pthread_cond_signal(&decoder_cv);
+
+ return OK;
+}
+
+status_t LPAPlayer::seekTo(int64_t time_us) {
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("seekTo: time_us %lld", time_us);
+ if ( mReachedEOS ) {
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ }
+ mSeeking = true;
+ mSeekTimeUs = time_us;
+ mPauseTime = mSeekTimeUs;
+ ALOGV("In seekTo(), mSeekTimeUs %lld",mSeekTimeUs);
+ mAudioSink->flush();
+ pthread_cond_signal(&decoder_cv);
+ return OK;
+}
+
+void LPAPlayer::pause(bool playPendingSamples) {
+ CHECK(mStarted);
+ if (mPaused) {
+ return;
+ }
+ ALOGV("pause: playPendingSamples %d", playPendingSamples);
+ mPaused = true;
+ A2DPState state;
+ if (playPendingSamples) {
+ if (!mIsA2DPEnabled) {
+ if (!mPauseEventPending) {
+ ALOGV("Posting an event for Pause timeout");
+ mQueue.postEventWithDelay(mPauseEvent, LPA_PAUSE_TIMEOUT_USEC);
+ mPauseEventPending = true;
+ }
+ mPauseTime = mSeekTimeUs + getTimeStamp(A2DP_DISABLED);
+ }
+ else {
+ mPauseTime = mSeekTimeUs + getTimeStamp(A2DP_ENABLED);
+ }
+ if (mAudioSink.get() != NULL)
+ mAudioSink->pause();
+ } else {
+ if (!mIsA2DPEnabled) {
+ if(!mPauseEventPending) {
+ ALOGV("Posting an event for Pause timeout");
+ mQueue.postEventWithDelay(mPauseEvent, LPA_PAUSE_TIMEOUT_USEC);
+ mPauseEventPending = true;
+ }
+ mPauseTime = mSeekTimeUs + getTimeStamp(A2DP_DISABLED);
+ } else {
+ mPauseTime = mSeekTimeUs + getTimeStamp(A2DP_ENABLED);
+ }
+ if (mAudioSink.get() != NULL) {
+ ALOGV("AudioSink pause");
+ mAudioSink->pause();
+ }
+ }
+}
+
+void LPAPlayer::resume() {
+ ALOGV("resume: mPaused %d",mPaused);
+ Mutex::Autolock autoLock(mResumeLock);
+ if ( mPaused) {
+ CHECK(mStarted);
+ if (!mIsA2DPEnabled) {
+ if(mPauseEventPending) {
+ ALOGV("Resume(): Cancelling the puaseTimeout event");
+ mPauseEventPending = false;
+ mQueue.cancelEvent(mPauseEvent->eventID());
+ }
+
+ }
+
+ if (!mIsAudioRouted) {
+ audio_output_flags_t flags = (audio_output_flags_t) (AUDIO_OUTPUT_FLAG_LPA |
+ AUDIO_OUTPUT_FLAG_DIRECT);
+ status_t err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, AUDIO_FORMAT_PCM_16_BIT,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &LPAPlayer::AudioSinkCallback,
+ this,
+ (mA2DPEnabled ? AUDIO_OUTPUT_FLAG_NONE : flags ));
+ if (err != NO_ERROR) {
+ ALOGE("Audio sink open failed.");
+ }
+ mIsAudioRouted = true;
+ }
+ mPaused = false;
+ mAudioSink->start();
+ pthread_cond_signal(&decoder_cv);
+ }
+}
+
+//static
+size_t LPAPlayer::AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *buffer, size_t size, void *cookie) {
+ if (buffer == NULL && size == AudioTrack::EVENT_UNDERRUN) {
+ LPAPlayer *me = (LPAPlayer *)cookie;
+ me->mReachedEOS = true;
+ me->mReachedOutputEOS = true;
+ ALOGV("postAudioEOS");
+ me->mObserver->postAudioEOS(0);
+ }
+ return 1;
+}
+
+void LPAPlayer::reset() {
+
+ ALOGV("Reset");
+ // Close the audiosink after all the threads exited to make sure
+ mReachedEOS = true;
+
+ // make sure Decoder thread has exited
+ ALOGV("Closing all the threads");
+ requestAndWaitForDecoderThreadExit();
+ requestAndWaitForA2DPNotificationThreadExit();
+
+ ALOGV("Close the Sink");
+ if (mIsAudioRouted) {
+ mAudioSink->stop();
+ mAudioSink->close();
+ mAudioSink.clear();
+ }
+ // Make sure to release any buffer we hold onto so that the
+ // source is able to stop().
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (mInputBuffer != NULL) {
+ ALOGV("AudioPlayer releasing input buffer.");
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ mSource->stop();
+
+ // The following hack is necessary to ensure that the OMX
+ // component is completely released by the time we may try
+ // to instantiate it again.
+ wp<MediaSource> tmp = mSource;
+ mSource.clear();
+ while (tmp.promote() != NULL) {
+ usleep(1000);
+ }
+
+ mPositionTimeMediaUs = -1;
+ mPositionTimeRealUs = -1;
+ mSeeking = false;
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ mFinalStatus = OK;
+ mStarted = false;
+}
+
+
+bool LPAPlayer::isSeeking() {
+ Mutex::Autolock autoLock(mLock);
+ return mSeeking;
+}
+
+bool LPAPlayer::reachedEOS(status_t *finalStatus) {
+ *finalStatus = OK;
+ Mutex::Autolock autoLock(mLock);
+ *finalStatus = mFinalStatus;
+ return mReachedOutputEOS;
+}
+
+
+void *LPAPlayer::decoderThreadWrapper(void *me) {
+ static_cast<LPAPlayer *>(me)->decoderThreadEntry();
+ return NULL;
+}
+
+
+void LPAPlayer::decoderThreadEntry() {
+
+ pthread_mutex_lock(&decoder_mutex);
+
+ pid_t tid = gettid();
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_AUDIO);
+ prctl(PR_SET_NAME, (unsigned long)"LPA DecodeThread", 0, 0, 0);
+
+ ALOGV("decoderThreadEntry wait for signal \n");
+ if (!mStarted) {
+ pthread_cond_wait(&decoder_cv, &decoder_mutex);
+ }
+ ALOGV("decoderThreadEntry ready to work \n");
+ pthread_mutex_unlock(&decoder_mutex);
+ if (killDecoderThread) {
+ return;
+ }
+ void* local_buf = malloc(MEM_BUFFER_SIZE);
+ int bytesWritten = 0;
+ while (!killDecoderThread) {
+
+ if (mReachedEOS || mPaused || !mIsAudioRouted) {
+ pthread_mutex_lock(&decoder_mutex);
+ pthread_cond_wait(&decoder_cv, &decoder_mutex);
+ pthread_mutex_unlock(&decoder_mutex);
+ continue;
+ }
+
+ if (!mIsA2DPEnabled) {
+ ALOGV("FillBuffer: MemBuffer size %d", MEM_BUFFER_SIZE);
+ ALOGV("Fillbuffer started");
+ //TODO: Add memset
+ bytesWritten = fillBuffer(local_buf, MEM_BUFFER_SIZE);
+ ALOGV("FillBuffer completed bytesToWrite %d", bytesWritten);
+
+ if(!killDecoderThread) {
+ mAudioSink->write(local_buf, bytesWritten);
+ }
+ }
+ }
+
+ free(local_buf);
+
+ //TODO: Call fillbuffer with different size and write to mAudioSink()
+}
+
+void *LPAPlayer::A2DPNotificationThreadWrapper(void *me) {
+ static_cast<LPAPlayer *>(me)->A2DPNotificationThreadEntry();
+ return NULL;
+}
+
+void LPAPlayer::A2DPNotificationThreadEntry() {
+ while (1) {
+ pthread_mutex_lock(&a2dp_notification_mutex);
+ pthread_cond_wait(&a2dp_notification_cv, &a2dp_notification_mutex);
+ pthread_mutex_unlock(&a2dp_notification_mutex);
+ if (killA2DPNotificationThread) {
+ break;
+ }
+
+ ALOGV("A2DP notification has come mIsA2DPEnabled: %d", mIsA2DPEnabled);
+
+ if (mIsA2DPEnabled) {
+ //TODO:
+ }
+ else {
+ //TODO
+ }
+ }
+ a2dpNotificationThreadAlive = false;
+ ALOGV("A2DPNotificationThread is dying");
+
+}
+
+void LPAPlayer::createThreads() {
+
+ //Initialize all the Mutexes and Condition Variables
+ pthread_mutex_init(&decoder_mutex, NULL);
+ pthread_mutex_init(&a2dp_notification_mutex, NULL);
+ pthread_cond_init (&decoder_cv, NULL);
+ pthread_cond_init (&a2dp_notification_cv, NULL);
+
+ // Create 4 threads Effect, decoder, event and A2dp
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ killDecoderThread = false;
+ killA2DPNotificationThread = false;
+
+ decoderThreadAlive = true;
+ a2dpNotificationThreadAlive = true;
+
+ ALOGV("Creating decoder Thread");
+ pthread_create(&decoderThread, &attr, decoderThreadWrapper, this);
+
+ ALOGV("Creating A2DP Notification Thread");
+ pthread_create(&A2DPNotificationThread, &attr, A2DPNotificationThreadWrapper, this);
+
+ pthread_attr_destroy(&attr);
+}
+
+size_t LPAPlayer::fillBuffer(void *data, size_t size) {
+
+ if (mReachedEOS) {
+ return 0;
+ }
+
+ bool postSeekComplete = false;
+
+ size_t size_done = 0;
+ size_t size_remaining = size;
+ while (size_remaining > 0) {
+ MediaSource::ReadOptions options;
+
+ {
+ Mutex::Autolock autoLock(mLock);
+
+ if (mSeeking) {
+ mInternalSeeking = false;
+ }
+ if (mSeeking || mInternalSeeking) {
+ if (mIsFirstBuffer) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+ mIsFirstBuffer = false;
+ }
+
+ options.setSeekTo(mSeekTimeUs);
+
+ if (mInputBuffer != NULL) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ size_remaining = size;
+ size_done = 0;
+
+ mSeeking = false;
+ if (mObserver && !mInternalSeeking) {
+ ALOGV("fillBuffer: Posting audio seek complete event");
+ postSeekComplete = true;
+ }
+ mInternalSeeking = false;
+ }
+ }
+
+ if (mInputBuffer == NULL) {
+ status_t err;
+
+ if (mIsFirstBuffer) {
+ mInputBuffer = mFirstBuffer;
+ mFirstBuffer = NULL;
+ err = mFirstBufferResult;
+
+ mIsFirstBuffer = false;
+ } else {
+ err = mSource->read(&mInputBuffer, &options);
+ }
+
+ CHECK((err == OK && mInputBuffer != NULL)
+ || (err != OK && mInputBuffer == NULL));
+
+ Mutex::Autolock autoLock(mLock);
+
+ if (err != OK) {
+ mReachedEOS = true;
+ mFinalStatus = err;
+ break;
+ }
+
+ CHECK(mInputBuffer->meta_data()->findInt64(
+ kKeyTime, &mPositionTimeMediaUs));
+ mFrameSize = mAudioSink->frameSize();
+ mPositionTimeRealUs =
+ ((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
+ / mSampleRate;
+ }
+ if (mInputBuffer->range_length() == 0) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+
+ continue;
+ }
+
+ size_t copy = size_remaining;
+ if (copy > mInputBuffer->range_length()) {
+ copy = mInputBuffer->range_length();
+ }
+
+ memcpy((char *)data + size_done,
+ (const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
+ copy);
+
+ mInputBuffer->set_range(mInputBuffer->range_offset() + copy,
+ mInputBuffer->range_length() - copy);
+
+ size_done += copy;
+ size_remaining -= copy;
+ }
+
+ if (postSeekComplete) {
+ mObserver->postAudioSeekComplete();
+ }
+
+ return size_done;
+}
+
+int64_t LPAPlayer::getRealTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ return getRealTimeUsLocked();
+}
+
+
+int64_t LPAPlayer::getRealTimeUsLocked(){
+ //Used for AV sync: irrelevant API for LPA.
+ return 0;
+}
+
+int64_t LPAPlayer::getTimeStamp(A2DPState state) {
+ uint64_t timestamp = 0;
+ switch (state) {
+ case A2DP_ENABLED:
+ case A2DP_DISCONNECT:
+ ALOGV("Get timestamp for A2DP");
+ break;
+ case A2DP_DISABLED:
+ case A2DP_CONNECT: {
+ mAudioSink->getTimeStamp(&timestamp);
+ break;
+ }
+ default:
+ break;
+ }
+ ALOGV("timestamp %lld ", timestamp);
+ return timestamp;
+}
+
+int64_t LPAPlayer::getMediaTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("getMediaTimeUs() mPaused %d mSeekTimeUs %lld mPauseTime %lld", mPaused, mSeekTimeUs, mPauseTime);
+ if (mPaused) {
+ return mPauseTime;
+ } else {
+ A2DPState state = mIsA2DPEnabled ? A2DP_ENABLED : A2DP_DISABLED;
+ return (mSeekTimeUs + getTimeStamp(state));
+ }
+}
+
+bool LPAPlayer::getMediaTimeMapping(
+ int64_t *realtime_us, int64_t *mediatime_us) {
+ Mutex::Autolock autoLock(mLock);
+
+ *realtime_us = mPositionTimeRealUs;
+ *mediatime_us = mPositionTimeMediaUs;
+
+ return mPositionTimeRealUs != -1 && mPositionTimeMediaUs != -1;
+}
+
+void LPAPlayer::requestAndWaitForDecoderThreadExit() {
+
+ if (!decoderThreadAlive)
+ return;
+ killDecoderThread = true;
+
+ /* Flush the audio sink to unblock the decoder thread
+ if any write to audio HAL is blocked */
+ if (!mReachedOutputEOS && mIsAudioRouted)
+ mAudioSink->flush();
+
+ pthread_cond_signal(&decoder_cv);
+ pthread_join(decoderThread,NULL);
+ ALOGV("decoder thread killed");
+
+}
+
+void LPAPlayer::requestAndWaitForA2DPNotificationThreadExit() {
+ if (!a2dpNotificationThreadAlive)
+ return;
+ killA2DPNotificationThread = true;
+ pthread_cond_signal(&a2dp_notification_cv);
+ pthread_join(A2DPNotificationThread,NULL);
+ ALOGV("a2dp notification thread killed");
+}
+
+void LPAPlayer::onPauseTimeOut() {
+ ALOGV("onPauseTimeOut");
+ Mutex::Autolock autoLock(mResumeLock);
+ if (!mPauseEventPending) {
+ return;
+ }
+ mPauseEventPending = false;
+ if(!mIsA2DPEnabled) {
+ // 1.) Set seek flags
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ if(mSeeking == false) {
+ mSeekTimeUs += getTimeStamp(A2DP_DISABLED);
+ mInternalSeeking = true;
+ } else {
+ //do not update seek time if user has already seeked
+ // to a new position
+ // also seek has to be posted back to player,
+ // so do not set mInternalSeeking flag
+ ALOGV("do not update seek time %lld ", mSeekTimeUs);
+ }
+ ALOGV("newseek time = %lld ", mSeekTimeUs);
+ // 2.) Close routing Session
+ mAudioSink->flush();
+ mAudioSink->stop();
+ mAudioSink->close();
+ mIsAudioRouted = false;
+ }
+
+}
+
+} //namespace android
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index dc8e4a3..488c2a3 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -40,6 +40,9 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/Utils.h>
#include <utils/String8.h>
+#ifdef QCOM_HARDWARE
+#include <QCMediaDefs.h>
+#endif
namespace android {
@@ -240,6 +243,9 @@ static const char *FourCC2MIME(uint32_t fourcc) {
case FOURCC('m', 'p', '4', 'a'):
return MEDIA_MIMETYPE_AUDIO_AAC;
+ case FOURCC('.', 'm', 'p', '3'):
+ return MEDIA_MIMETYPE_AUDIO_MPEG;
+
case FOURCC('s', 'a', 'm', 'r'):
return MEDIA_MIMETYPE_AUDIO_AMR_NB;
@@ -257,6 +263,28 @@ static const char *FourCC2MIME(uint32_t fourcc) {
case FOURCC('a', 'v', 'c', '1'):
return MEDIA_MIMETYPE_VIDEO_AVC;
+#ifdef QCOM_HARDWARE
+ case FOURCC('s', 'q', 'c', 'p'):
+ return MEDIA_MIMETYPE_AUDIO_QCELP;
+
+ case FOURCC('s', 'e', 'v', 'c'):
+ return MEDIA_MIMETYPE_AUDIO_EVRC;
+
+ case FOURCC('d', 't', 's', 'c'):
+ case FOURCC('d', 't', 's', 'h'):
+ case FOURCC('d', 't', 's', 'l'):
+ return MEDIA_MIMETYPE_AUDIO_DTS;
+
+ case FOURCC('d', 't', 's', 'e'):
+ return MEDIA_MIMETYPE_AUDIO_DTS_LBR;
+
+ case FOURCC('a', 'c', '-', '3'):
+ return MEDIA_MIMETYPE_AUDIO_AC3;
+
+ case FOURCC('e', 'c', '-', '3'):
+ return MEDIA_MIMETYPE_AUDIO_EAC3;
+#endif
+
default:
CHECK(!"should not be here.");
return NULL;
@@ -921,6 +949,17 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
case FOURCC('m', 'p', '4', 'a'):
case FOURCC('s', 'a', 'm', 'r'):
case FOURCC('s', 'a', 'w', 'b'):
+#ifdef QCOM_HARDWARE
+ case FOURCC('.', 'm', 'p', '3'):
+ case FOURCC('s', 'e', 'v', 'c'):
+ case FOURCC('s', 'q', 'c', 'p'):
+ case FOURCC('d', 't', 's', 'c'):
+ case FOURCC('d', 't', 's', 'h'):
+ case FOURCC('d', 't', 's', 'l'):
+ case FOURCC('d', 't', 's', 'e'):
+ case FOURCC('a', 'c', '-', '3'):
+ case FOURCC('e', 'c', '-', '3'):
+#endif
{
uint8_t buffer[8 + 20];
if (chunk_data_size < (ssize_t)sizeof(buffer)) {
@@ -961,7 +1000,16 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
mLastTrack->meta->setInt32(kKeySampleRate, sample_rate);
off64_t stop_offset = *offset + chunk_size;
- *offset = data_offset + sizeof(buffer);
+#ifdef QCOM_HARDWARE
+ if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_MPEG, FourCC2MIME(chunk_type)) ||
+ !strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, FourCC2MIME(chunk_type))) {
+ // ESD is not required in mp3
+ // amr wb with damr atom corrupted can cause the clip to not play
+ *offset = stop_offset;
+ } else
+#endif
+ *offset = data_offset + sizeof(buffer);
+
while (*offset < stop_offset) {
status_t err = parseChunk(offset, depth + 1);
if (err != OK) {
@@ -1219,6 +1267,18 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
break;
}
+#ifdef QCOM_HARDWARE
+ case FOURCC('d', 'd', 't', 's'):
+ case FOURCC('d', 'a', 'c', '3'):
+ case FOURCC('d', 'e', 'c', '3'):
+ {
+ //no information need to be passed here, just log and end
+ ALOGV("ddts/dac3/dec3 pass from mpeg4 extractor");
+ *offset += chunk_size;
+ break;
+ }
+#endif
+
case FOURCC('a', 'v', 'c', 'C'):
{
char buffer[256];
@@ -1799,18 +1859,26 @@ status_t MPEG4Extractor::updateAudioTrackInfoFromESDS_MPEG4Audio(
return ERROR_MALFORMED;
}
+#ifdef QCOM_HARDWARE
+ if (objectTypeIndication == 0xA0) {
+ // This isn't MPEG4 audio at all, it's EVRC
+ mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EVRC);
+ return OK;
+ }
+#endif
+
if (objectTypeIndication == 0xe1) {
// This isn't MPEG4 audio at all, it's QCELP 14k...
mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_QCELP);
return OK;
}
- if (objectTypeIndication == 0x6b) {
- // The media subtype is MP3 audio
- // Our software MP3 audio decoder may not be able to handle
- // packetized MP3 audio; for now, lets just return ERROR_UNSUPPORTED
- ALOGE("MP3 track in MP4/3GPP file is not supported");
- return ERROR_UNSUPPORTED;
+ if (objectTypeIndication == 0x6b
+ || objectTypeIndication == 0x69) {
+ // This is mpeg1/2 audio content, set mimetype to mpeg
+ mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
+ ALOGD("objectTypeIndication:0x%x, set mimetype to mpeg ",objectTypeIndication);
+ return OK;
}
const uint8_t *csd;
@@ -2313,6 +2381,9 @@ static bool LegacySniffMPEG4(
}
if (!memcmp(header, "ftyp3gp", 7) || !memcmp(header, "ftypmp42", 8)
+#ifdef QCOM_HARDWARE
+ || !memcmp(header, "ftyp3g2a", 8) || !memcmp(header, "ftyp3g2b", 8) || !memcmp(header, "ftyp3g2c", 8)
+#endif
|| !memcmp(header, "ftyp3gr6", 8) || !memcmp(header, "ftyp3gs6", 8)
|| !memcmp(header, "ftyp3ge6", 8) || !memcmp(header, "ftyp3gg6", 8)
|| !memcmp(header, "ftypisom", 8) || !memcmp(header, "ftypM4V ", 8)
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 8b52e15..2b76660 100755
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -2040,6 +2040,83 @@ status_t MPEG4Writer::Track::threadEntry() {
mGotAllCodecSpecificData = true;
continue;
}
+#if defined(OMAP_ENHANCEMENT) && defined(TARGET_OMAP3)
+ else if (mIsAvc && count < 3) {
+ size_t size = buffer->range_length();
+
+ switch (count) {
+ case 1:
+ {
+ CHECK_EQ(mCodecSpecificData, (void *)NULL);
+ mCodecSpecificData = malloc(size + 8);
+ uint8_t *header = (uint8_t *)mCodecSpecificData;
+ header[0] = 1;
+ header[1] = 0x42; // profile
+ header[2] = 0x80;
+ header[3] = 0x1e; // level
+ header[4] = 0xfc | 3;
+ header[5] = 0xe0 | 1;
+ header[6] = size >> 8;
+ header[7] = size & 0xff;
+ memcpy(&header[8],
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ size);
+
+ mCodecSpecificDataSize = size + 8;
+ break;
+ }
+
+ case 2:
+ {
+ size_t offset = mCodecSpecificDataSize;
+ mCodecSpecificDataSize += size + 3;
+ mCodecSpecificData = realloc(mCodecSpecificData, mCodecSpecificDataSize);
+ uint8_t *header = (uint8_t *)mCodecSpecificData;
+ header[offset] = 1;
+ header[offset + 1] = size >> 8;
+ header[offset + 2] = size & 0xff;
+ memcpy(&header[offset + 3],
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ size);
+ break;
+ }
+ }
+
+ buffer->release();
+ buffer = NULL;
+
+ continue;
+
+ } else if (mCodecSpecificData == NULL && mIsMPEG4) {
+ const uint8_t *data =
+ (const uint8_t *)buffer->data() + buffer->range_offset();
+
+ const size_t size = buffer->range_length();
+
+ size_t offset = 0;
+ while (offset + 3 < size) {
+ if (data[offset] == 0x00 && data[offset + 1] == 0x00
+ && data[offset + 2] == 0x01 && data[offset + 3] == 0xb6) {
+ break;
+ }
+
+ ++offset;
+ }
+
+ // CHECK(offset + 3 < size);
+ if (offset + 3 >= size) {
+ // XXX assume the entire first chunk of data is the codec specific
+ // data.
+ offset = size;
+ }
+
+ mCodecSpecificDataSize = offset;
+ mCodecSpecificData = malloc(offset);
+ memcpy(mCodecSpecificData, data, offset);
+
+ buffer->set_range(buffer->range_offset() + offset, size - offset);
+ }
+#endif
// Make a deep copy of the MediaBuffer and Metadata and release
// the original as soon as we can
@@ -2098,7 +2175,9 @@ status_t MPEG4Writer::Track::threadEntry() {
}
timestampUs -= previousPausedDurationUs;
+#ifndef OMAP_ENHANCEMENT
CHECK_GE(timestampUs, 0ll);
+#endif
if (!mIsAudio) {
/*
* Composition time: timestampUs
diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp
index b18c916..1eb5c19 100644
--- a/media/libstagefright/MediaExtractor.cpp
+++ b/media/libstagefright/MediaExtractor.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "MediaExtractor"
#include <utils/Log.h>
+#include <cutils/properties.h>
#include "include/AMRExtractor.h"
#include "include/MP3Extractor.h"
@@ -24,12 +26,16 @@
#include "include/FragmentedMP4Extractor.h"
#include "include/WAVExtractor.h"
#include "include/OggExtractor.h"
+#include "include/PCMExtractor.h"
#include "include/MPEG2PSExtractor.h"
#include "include/MPEG2TSExtractor.h"
#include "include/DRMExtractor.h"
#include "include/WVMExtractor.h"
#include "include/FLACExtractor.h"
#include "include/AACExtractor.h"
+#ifdef QCOM_HARDWARE
+#include "include/ExtendedExtractor.h"
+#endif
#include "matroska/MatroskaExtractor.h"
@@ -54,6 +60,7 @@ uint32_t MediaExtractor::flags() const {
sp<MediaExtractor> MediaExtractor::Create(
const sp<DataSource> &source, const char *mime) {
sp<AMessage> meta;
+ bool bCheckExtendedExtractor = false;
String8 tmp;
if (mime == NULL) {
@@ -100,6 +107,9 @@ sp<MediaExtractor> MediaExtractor::Create(
} else {
ret = new MPEG4Extractor(source);
}
+#ifdef QCOM_ENHANCED_AUDIO
+ bCheckExtendedExtractor = true;
+#endif
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
ret = new MP3Extractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)
@@ -122,6 +132,10 @@ sp<MediaExtractor> MediaExtractor::Create(
ret = new AACExtractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) {
ret = new MPEG2PSExtractor(source);
+#ifdef STE_FM
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
+ ret = new PCMExtractor(source);
+#endif
}
if (ret != NULL) {
@@ -132,6 +146,49 @@ sp<MediaExtractor> MediaExtractor::Create(
}
}
+#ifdef QCOM_HARDWARE
+ //If default extractor created and flag is not set to check extended extractor,
+ // then pass default extractor.
+ if (ret && (!bCheckExtendedExtractor) ) {
+ ALOGD("returning default extractor");
+ return ret;
+ }
+
+ //Create Extended Extractor only if default extractor are not selected
+ ALOGV("Using ExtendedExtractor");
+ sp<MediaExtractor> retextParser = ExtendedExtractor::CreateExtractor(source, mime);
+ //if we came here, it means we do not have to use the default extractor, if created above.
+ bool bUseDefaultExtractor = false;
+
+ if(bCheckExtendedExtractor) {
+ ALOGV("bCheckExtendedExtractor is true");
+ //bCheckExtendedExtractor is true which means default extractor was found
+ // but we want to give preference to extended extractor based on certain
+ // codec type.Set bUseDefaultExtractor to true if extended extractor
+ //does not return specific codec type that we are looking for.
+ bUseDefaultExtractor = true;
+ ALOGV(" bCheckExtendedExtractor is true..checking extended extractor");
+ for (size_t i = 0; (retextParser!=NULL) && (i < retextParser->countTracks()); ++i) {
+ sp<MetaData> meta = retextParser->getTrackMetaData(i);
+ const char *mime;
+ bool success = meta->findCString(kKeyMIMEType, &mime);
+ if( (success == true) && !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)) {
+ ALOGV("Discarding default extractor and using the extended one");
+ //We found what we were looking for, set bUseDefaultExtractor to false;
+ bUseDefaultExtractor = false;
+ if(ret) {
+ //delete the default extractor as we will be using extended extractor..
+ delete ret;
+ }
+ break;
+ }
+ }
+ }
+ if( (retextParser != NULL) && (!bUseDefaultExtractor) ) {
+ ALOGV("returning retextParser");
+ return retextParser;
+ }
+#endif
return ret;
}
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index 70de174..5988061 100755..100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -1,5 +1,9 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2010 - 2012, The Linux Foundation. All rights reserved.
+ *
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,6 +23,7 @@
#include <utils/Log.h>
#include "include/AACEncoder.h"
+#include "include/MP3Decoder.h"
#include "include/ESDS.h"
@@ -42,10 +47,46 @@
#include <OMX_Audio.h>
#include <OMX_Component.h>
+#ifdef QCOM_HARDWARE
+#include <QCMediaDefs.h>
+#include <QCMetaData.h>
+#include <QOMX_AudioExtensions.h>
+#include <OMX_QCOMExtns.h>
+#endif
#include "include/avc_utils.h"
+#ifdef USE_SAMSUNG_COLORFORMAT
+#include <sec_format.h>
+#endif
+
+#ifdef USE_TI_CUSTOM_DOMX
+#include <OMX_TI_Video.h>
+#include <OMX_TI_Index.h>
+#include <OMX_TI_IVCommon.h>
+#include <ctype.h>
+#endif
+
namespace android {
+#ifdef USE_SAMSUNG_COLORFORMAT
+static const int OMX_SEC_COLOR_FormatNV12TPhysicalAddress = 0x7F000001;
+static const int OMX_SEC_COLOR_FormatNV12LPhysicalAddress = 0x7F000002;
+static const int OMX_SEC_COLOR_FormatNV12LVirtualAddress = 0x7F000003;
+static const int OMX_SEC_COLOR_FormatNV12Tiled = 0x7FC00002;
+static int calc_plane(int width, int height)
+{
+ int mbX, mbY;
+
+ mbX = (width + 15)/16;
+ mbY = (height + 15)/16;
+
+ /* Alignment for interlaced processing */
+ mbY = (mbY + 1) / 2 * 2;
+
+ return (mbX * 16) * (mbY * 16);
+}
+#endif // USE_SAMSUNG_COLORFORMAT
+
// Treat time out as an error if we have not received any output
// buffers after 3 seconds.
const static int64_t kBufferFilledEventTimeOutNs = 3000000000LL;
@@ -57,6 +98,11 @@ const static int64_t kBufferFilledEventTimeOutNs = 3000000000LL;
// component in question is buggy or not.
const static uint32_t kMaxColorFormatSupported = 1000;
+
+#define FACTORY_CREATE(name) \
+static sp<MediaSource> Make##name(const sp<MediaSource> &source) { \
+ return new name(source); \
+}
#define FACTORY_CREATE_ENCODER(name) \
static sp<MediaSource> Make##name(const sp<MediaSource> &source, const sp<MetaData> &meta) { \
return new name(source, meta); \
@@ -64,6 +110,7 @@ static sp<MediaSource> Make##name(const sp<MediaSource> &source, const sp<MetaDa
#define FACTORY_REF(name) { #name, Make##name },
+FACTORY_CREATE(MP3Decoder)
FACTORY_CREATE_ENCODER(AACEncoder)
static sp<MediaSource> InstantiateSoftwareEncoder(
@@ -87,8 +134,29 @@ static sp<MediaSource> InstantiateSoftwareEncoder(
return NULL;
}
+static sp<MediaSource> InstantiateSoftwareDecoder(
+ const char *name, const sp<MediaSource> &source) {
+ struct FactoryInfo {
+ const char *name;
+ sp<MediaSource> (*CreateFunc)(const sp<MediaSource> &);
+ };
+
+ static const FactoryInfo kFactoryInfo[] = {
+ FACTORY_REF(MP3Decoder)
+ };
+ for (size_t i = 0;
+ i < sizeof(kFactoryInfo) / sizeof(kFactoryInfo[0]); ++i) {
+ if (!strcmp(name, kFactoryInfo[i].name)) {
+ return (*kFactoryInfo[i].CreateFunc)(source);
+ }
+ }
+
+ return NULL;
+}
+
#undef FACTORY_CREATE_ENCODER
#undef FACTORY_REF
+#undef FACTORY_CREATE
#define CODEC_LOGI(x, ...) ALOGI("[%s] "x, mComponentName, ##__VA_ARGS__)
#define CODEC_LOGV(x, ...) ALOGV("[%s] "x, mComponentName, ##__VA_ARGS__)
@@ -133,7 +201,8 @@ static void InitOMXParams(T *params) {
}
static bool IsSoftwareCodec(const char *componentName) {
- if (!strncmp("OMX.google.", componentName, 11)) {
+ if (!strncmp("OMX.google.", componentName, 11)
+ || !strncmp("OMX.PV.", componentName, 7)) {
return true;
}
@@ -205,6 +274,7 @@ void OMXCodec::findMatchingCodecs(
const char *componentName = list->getCodecName(matchIndex);
// If a specific codec is requested, skip the non-matching ones.
+ ALOGV("matchComponentName %s ",matchComponentName);
if (matchComponentName && strcmp(componentName, matchComponentName)) {
continue;
}
@@ -248,6 +318,36 @@ uint32_t OMXCodec::getComponentQuirks(
index, "output-buffers-are-unreadable")) {
quirks |= kOutputBuffersAreUnreadable;
}
+#ifdef QCOM_HARDWARE
+ if (list->codecHasQuirk(
+ index, "requires-loaded-to-idle-after-allocation")) {
+ quirks |= kRequiresLoadedToIdleAfterAllocation;
+ }
+ if (list->codecHasQuirk(
+ index, "requires-global-flush")) {
+ quirks |= kRequiresGlobalFlush;
+ }
+ if (list->codecHasQuirk(
+ index, "requires-wma-pro-component")) {
+ quirks |= kRequiresWMAProComponent;
+ }
+ if (list->codecHasQuirk(
+ index, "defers-output-buffer-allocation")) {
+ quirks |= kDefersOutputBufferAllocation;
+ }
+
+ quirks |= QCOMXCodec::getQCComponentQuirks(list,index);
+#endif
+#ifdef OMAP_ENHANCEMENT
+ if (list->codecHasQuirk(
+ index, "avoid-memcopy-input-recording-frames")) {
+ quirks |= kAvoidMemcopyInputRecordingFrames;
+ }
+ if (list->codecHasQuirk(
+ index, "input-buffer-sizes-are-bogus")) {
+ quirks |= kInputBufferSizesAreBogus;
+ }
+#endif
return quirks;
}
@@ -319,17 +419,38 @@ sp<MediaSource> OMXCodec::Create(
componentName = tmp.c_str();
}
+ sp<MediaSource> softwareCodec;
if (createEncoder) {
- sp<MediaSource> softwareCodec =
- InstantiateSoftwareEncoder(componentName, source, meta);
-
- if (softwareCodec != NULL) {
- ALOGV("Successfully allocated software codec '%s'", componentName);
-
- return softwareCodec;
- }
+ softwareCodec = InstantiateSoftwareEncoder(componentName, source, meta);
+ } else {
+ softwareCodec = InstantiateSoftwareDecoder(componentName, source);
}
-
+ if (softwareCodec != NULL) {
+ ALOGE("Successfully allocated software codec '%s'", componentName);
+ return softwareCodec;
+ }
+#ifdef QCOM_HARDWARE
+ //quirks = getComponentQuirks(componentNameBase, createEncoder);
+ if(quirks & kRequiresWMAProComponent)
+ {
+ int32_t version;
+ CHECK(meta->findInt32(kKeyWMAVersion, &version));
+ if(version==kTypeWMA)
+ {
+ componentName = "OMX.qcom.audio.decoder.wma";
+ }
+ else if(version==kTypeWMAPro)
+ {
+ componentName= "OMX.qcom.audio.decoder.wma10Pro";
+ }
+ else if(version==kTypeWMALossLess)
+ {
+ componentName= "OMX.qcom.audio.decoder.wmaLossLess";
+ }
+ }
+
+ QCOMXCodec::setASFQuirks(quirks, meta, componentName);
+#endif
ALOGV("Attempting to allocate OMX node '%s'", componentName);
if (!createEncoder
@@ -471,8 +592,14 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
esds.getCodecSpecificInfo(
&codec_specific_data, &codec_specific_data_size);
- addCodecSpecificData(
+ const char * mime_type;
+ meta->findCString(kKeyMIMEType, &mime_type);
+ if (strncmp(mime_type,
+ MEDIA_MIMETYPE_AUDIO_MPEG,
+ strlen(MEDIA_MIMETYPE_AUDIO_MPEG))) {
+ addCodecSpecificData(
codec_specific_data, codec_specific_data_size);
+ }
} else if (meta->findData(kKeyAVCC, &type, &data, &size)) {
// Parse the AVCDecoderConfigurationRecord
@@ -492,9 +619,25 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
CHECK(meta->findData(kKeyVorbisBooks, &type, &data, &size));
addCodecSpecificData(data, size);
+#ifdef QCOM_HARDWARE
+ } else if (meta->findData(kKeyRawCodecSpecificData, &type, &data, &size)) {
+ ALOGV("OMXCodec::configureCodec found kKeyRawCodecSpecificData of size %d\n", size);
+ addCodecSpecificData(data, size);
+ } else {
+ QCOMXCodec::checkAndAddRawFormat(this,meta);
+#endif
}
}
+#ifdef QCOM_HARDWARE
+ status_t errRetVal = QCOMXCodec::configureDIVXCodec( meta, mMIME, mOMX, mNode,
+ (OMXCodec::mIsEncoder ?
+ kPortIndexOutput : kPortIndexInput));
+ if(OK != errRetVal) {
+ return errRetVal;
+ }
+#endif
+
int32_t bitRate = 0;
if (mIsEncoder) {
CHECK(meta->findInt32(kKeyBitRate, &bitRate));
@@ -522,6 +665,42 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
CODEC_LOGE("setAACFormat() failed (err = %d)", err);
return err;
}
+
+#ifdef QCOM_HARDWARE
+ uint32_t type;
+ const void *data;
+ size_t size;
+
+ if (meta->findData(kKeyAacCodecSpecificData, &type, &data, &size)) {
+ ALOGV("OMXCodec:: configureCodec found kKeyAacCodecSpecificData of size %d\n", size);
+ addCodecSpecificData(data, size);
+ }
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mMIME)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setAC3Format(numChannels, sampleRate);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_EAC3, mMIME)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setAC3Format(numChannels, sampleRate); //since AC3 and EAC3 use same format at present
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_EVRC, mMIME)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setEVRCFormat(numChannels, sampleRate, bitRate);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_QCELP, mMIME)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setQCELPFormat(numChannels, sampleRate, bitRate);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_WMA, mMIME)) {
+ status_t err = setWMAFormat(meta);
+ if(err!=OK){
+ return err;
+ }
+#endif
} else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_ALAW, mMIME)
|| !strcasecmp(MEDIA_MIMETYPE_AUDIO_G711_MLAW, mMIME)) {
// These are PCM-like formats with a fixed sample rate but
@@ -539,10 +718,37 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+#ifdef QCOM_HARDWARE
+ } else {
+ status_t err = QCOMXCodec::setQCFormat(meta, mMIME, mOMX, mNode, this, mIsEncoder);
+
+ if(OK != err) {
+ return err;
+ }
+#endif
}
if (!strncasecmp(mMIME, "video/", 6)) {
+#ifdef QCOM_HARDWARE
+ if ((mFlags & kClientNeedsFramebuffer) && !strncmp(mComponentName, "OMX.qcom.", 9)) {
+ ALOGV("Enabling thumbnail mode.");
+ QOMX_ENABLETYPE enableType;
+ OMX_INDEXTYPE indexType;
+
+ status_t err = mOMX->getExtensionIndex(
+ mNode, OMX_QCOM_INDEX_PARAM_VIDEO_SYNCFRAMEDECODINGMODE, &indexType);
+
+ CHECK_EQ(err, (status_t)OK);
+ enableType.bEnable = OMX_TRUE;
+
+ err = mOMX->setParameter(
+ mNode, indexType, &enableType, sizeof(enableType));
+ CHECK_EQ(err, (status_t)OK);
+
+ ALOGV("Thumbnail mode enabled.");
+ }
+#endif
if (mIsEncoder) {
setVideoInputFormat(mMIME, meta);
} else {
@@ -552,6 +758,10 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) {
if (err != OK) {
return err;
}
+
+#ifdef QCOM_HARDWARE
+ QCOMXCodec::setQCSpecificVideoFormat(meta,mOMX,mNode,mComponentName );
+#endif
}
}
@@ -665,6 +875,22 @@ status_t OMXCodec::setVideoPortFormatType(
index, format.eCompressionFormat, format.eColorFormat);
#endif
+ if (!strcmp("OMX.TI.Video.encoder", mComponentName) ||
+ !strcmp("OMX.TI.720P.Encoder", mComponentName)) {
+ if (portIndex == kPortIndexInput
+ && colorFormat == format.eColorFormat) {
+ // eCompressionFormat does not seem right.
+ found = true;
+ break;
+ }
+ if (portIndex == kPortIndexOutput
+ && compressionFormat == format.eCompressionFormat) {
+ // eColorFormat does not seem right.
+ found = true;
+ break;
+ }
+ }
+
if (format.eCompressionFormat == compressionFormat
&& format.eColorFormat == colorFormat) {
found = true;
@@ -691,6 +917,13 @@ status_t OMXCodec::setVideoPortFormatType(
return err;
}
+#ifdef USE_SAMSUNG_COLORFORMAT
+#define ALIGN_TO_8KB(x) ((((x) + (1 << 13) - 1) >> 13) << 13)
+#define ALIGN_TO_32B(x) ((((x) + (1 << 5) - 1) >> 5) << 5)
+#define ALIGN_TO_128B(x) ((((x) + (1 << 7) - 1) >> 7) << 7)
+#define ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+#endif
+
static size_t getFrameSize(
OMX_COLOR_FORMATTYPE colorFormat, int32_t width, int32_t height) {
switch (colorFormat) {
@@ -710,8 +943,19 @@ static size_t getFrameSize(
* this part in the future
*/
case OMX_COLOR_FormatAndroidOpaque:
+#ifdef USE_SAMSUNG_COLORFORMAT
+ case OMX_SEC_COLOR_FormatNV12TPhysicalAddress:
+ case OMX_SEC_COLOR_FormatNV12LPhysicalAddress:
+#endif
return (width * height * 3) / 2;
-
+#ifdef USE_SAMSUNG_COLORFORMAT
+ case OMX_SEC_COLOR_FormatNV12LVirtualAddress:
+ return ALIGN((ALIGN(width, 16) * ALIGN(height, 16)), 2048) + ALIGN((ALIGN(width, 16) * ALIGN(height >> 1, 8)), 2048);
+ case OMX_SEC_COLOR_FormatNV12Tiled:
+ static unsigned int frameBufferYSise = ALIGN_TO_8KB(ALIGN_TO_128B(width) * ALIGN_TO_32B(height));
+ static unsigned int frameBufferUVSise = ALIGN_TO_8KB(ALIGN_TO_128B(width) * ALIGN_TO_32B(height/2));
+ return (frameBufferYSise + frameBufferUVSise);
+#endif
default:
CHECK(!"Should not be here. Unsupported color format.");
break;
@@ -755,7 +999,7 @@ status_t OMXCodec::isColorFormatSupported(
// the incremented index (bug 2897413).
CHECK_EQ(index, portFormat.nIndex);
if (portFormat.eColorFormat == colorFormat) {
- CODEC_LOGV("Found supported color format: %d", portFormat.eColorFormat);
+ CODEC_LOGE("Found supported color format: %d", portFormat.eColorFormat);
return OK; // colorFormat is supported!
}
++index;
@@ -792,8 +1036,15 @@ void OMXCodec::setVideoInputFormat(
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
compressionFormat = OMX_VIDEO_CodingH263;
} else {
- ALOGE("Not a supported video mime type: %s", mime);
- CHECK(!"Should not be here. Not a supported video mime type.");
+#ifdef QCOM_HARDWARE
+ status_t err = QCOMXCodec::setQCVideoInputFormat(mime, &compressionFormat);
+ if(err != OK) {
+#endif
+ ALOGE("Not a supported video mime type: %s", mime);
+ CHECK(!"Should not be here. Not a supported video mime type.");
+#ifdef QCOM_HARDWARE
+ }
+#endif
}
OMX_COLOR_FORMATTYPE colorFormat;
@@ -1121,7 +1372,12 @@ status_t OMXCodec::setupAVCEncoderParameters(const sp<MetaData>& meta) {
h264type.eLevel = static_cast<OMX_VIDEO_AVCLEVELTYPE>(profileLevel.mLevel);
// XXX
+#ifdef USE_TI_DUCATI_H264_PROFILE
+ if ((strncmp(mComponentName, "OMX.TI.DUCATI1", 14) != 0)
+ && (h264type.eProfile != OMX_VIDEO_AVCProfileBaseline)) {
+#else
if (h264type.eProfile != OMX_VIDEO_AVCProfileBaseline) {
+#endif
ALOGW("Use baseline profile instead of %d for AVC recording",
h264type.eProfile);
h264type.eProfile = OMX_VIDEO_AVCProfileBaseline;
@@ -1189,8 +1445,16 @@ status_t OMXCodec::setVideoOutputFormat(
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG2, mime)) {
compressionFormat = OMX_VIDEO_CodingMPEG2;
} else {
- ALOGE("Not a supported video mime type: %s", mime);
- CHECK(!"Should not be here. Not a supported video mime type.");
+#ifdef QCOM_HARDWARE
+ status_t err = QCOMXCodec::setQCVideoOutputFormat(mime,&compressionFormat);
+
+ if(err != OK) {
+#endif
+ ALOGE("Not a supported video mime type: %s", mime);
+ CHECK(!"Should not be here. Not a supported video mime type.");
+#ifdef QCOM_HARDWARE
+ }
+#endif
}
status_t err = setVideoPortFormatType(
@@ -1218,7 +1482,21 @@ status_t OMXCodec::setVideoOutputFormat(
|| format.eColorFormat == OMX_COLOR_FormatCbYCrY
|| format.eColorFormat == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
|| format.eColorFormat == OMX_QCOM_COLOR_FormatYVU420SemiPlanar
- || format.eColorFormat == OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar64x32Tile2m8ka);
+ || format.eColorFormat == OMX_QCOM_COLOR_FormatYUV420PackedSemiPlanar64x32Tile2m8ka
+#ifdef USE_SAMSUNG_COLORFORMAT
+ || format.eColorFormat == OMX_SEC_COLOR_FormatNV12TPhysicalAddress
+ || format.eColorFormat == OMX_SEC_COLOR_FormatNV12Tiled
+#endif
+ );
+
+#ifdef USE_SAMSUNG_COLORFORMAT
+ if (!strncmp("OMX.SEC.", mComponentName, 8)) {
+ if (mNativeWindow == NULL)
+ format.eColorFormat = OMX_COLOR_FormatYUV420Planar;
+ else
+ format.eColorFormat = OMX_COLOR_FormatYUV420SemiPlanar;
+ }
+#endif
int32_t colorFormat;
if (meta->findInt32(kKeyColorFormat, &colorFormat)
@@ -1263,7 +1541,11 @@ status_t OMXCodec::setVideoOutputFormat(
#if 1
// XXX Need a (much) better heuristic to compute input buffer sizes.
+#ifdef USE_SAMSUNG_COLORFORMAT
+ const size_t X = 64 * 8 * 1024;
+#else
const size_t X = 64 * 1024;
+#endif
if (def.nBufferSize < X) {
def.nBufferSize = X;
}
@@ -1330,6 +1612,9 @@ OMXCodec::OMXCodec(
mState(LOADED),
mInitialBufferSubmit(true),
mSignalledEOS(false),
+#ifdef QCOM_HARDWARE
+ mFinalStatus(OK),
+#endif
mNoMoreOutputData(false),
mOutputPortSettingsHaveChanged(false),
mSeekTimeUs(-1),
@@ -1378,6 +1663,12 @@ void OMXCodec::setComponentRole(
"audio_decoder.g711mlaw", "audio_encoder.g711mlaw" },
{ MEDIA_MIMETYPE_AUDIO_G711_ALAW,
"audio_decoder.g711alaw", "audio_encoder.g711alaw" },
+#ifdef QCOM_HARDWARE
+ { MEDIA_MIMETYPE_AUDIO_EVRC,
+ "audio_decoder.evrchw", "audio_encoder.evrc" },
+ { MEDIA_MIMETYPE_AUDIO_QCELP,
+ "audio_decoder,qcelp13Hw", "audio_encoder.qcelp13" },
+#endif
{ MEDIA_MIMETYPE_VIDEO_AVC,
"video_decoder.avc", "video_encoder.avc" },
{ MEDIA_MIMETYPE_VIDEO_MPEG4,
@@ -1390,6 +1681,16 @@ void OMXCodec::setComponentRole(
"audio_decoder.raw", "audio_encoder.raw" },
{ MEDIA_MIMETYPE_AUDIO_FLAC,
"audio_decoder.flac", "audio_encoder.flac" },
+#ifdef QCOM_HARDWARE
+ { MEDIA_MIMETYPE_VIDEO_DIVX,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_AUDIO_AC3,
+ "audio_decoder.ac3", NULL },
+ { MEDIA_MIMETYPE_AUDIO_EAC3,
+ "audio_decoder.eac3", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX311,
+ "video_decoder.divx", NULL },
+#endif
};
static const size_t kNumMimeToRole =
@@ -1403,6 +1704,9 @@ void OMXCodec::setComponentRole(
}
if (i == kNumMimeToRole) {
+#ifdef QCOM_HARDWARE
+ QCOMXCodec::checkQCRole(omx, node, isEncoder, mime);
+#endif
return;
}
@@ -1709,11 +2013,34 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
return err;
}
+#ifndef USE_SAMSUNG_COLORFORMAT
err = native_window_set_buffers_geometry(
mNativeWindow.get(),
def.format.video.nFrameWidth,
def.format.video.nFrameHeight,
def.format.video.eColorFormat);
+#else
+ OMX_COLOR_FORMATTYPE eColorFormat;
+
+ switch (def.format.video.eColorFormat) {
+ case OMX_SEC_COLOR_FormatNV12TPhysicalAddress:
+ eColorFormat = (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_CUSTOM_YCbCr_420_SP_TILED;
+ break;
+ case OMX_COLOR_FormatYUV420SemiPlanar:
+ eColorFormat = (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YCbCr_420_SP;
+ break;
+ case OMX_COLOR_FormatYUV420Planar:
+ default:
+ eColorFormat = (OMX_COLOR_FORMATTYPE)HAL_PIXEL_FORMAT_YCbCr_420_P;
+ break;
+ }
+
+ err = native_window_set_buffers_geometry(
+ mNativeWindow.get(),
+ def.format.video.nFrameWidth,
+ def.format.video.nFrameHeight,
+ eColorFormat);
+#endif
if (err != 0) {
ALOGE("native_window_set_buffers_geometry failed: %s (%d)",
@@ -1758,8 +2085,10 @@ status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
}
ALOGV("native_window_set_usage usage=0x%lx", usage);
+
err = native_window_set_usage(
mNativeWindow.get(), usage | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP);
+
if (err != 0) {
ALOGE("native_window_set_usage failed: %s (%d)", strerror(-err), -err);
return err;
@@ -2114,6 +2443,14 @@ void OMXCodec::on_message(const omx_message &msg) {
// Buffer could not be released until empty buffer done is called.
if (info->mMediaBuffer != NULL) {
+#ifdef OMAP_ENHANCEMENT
+ if (mIsEncoder &&
+ (mQuirks & kAvoidMemcopyInputRecordingFrames)) {
+ // If zero-copy mode is enabled this will send the
+ // input buffer back to the upstream source.
+ restorePatchedDataPointer(info);
+ }
+#endif
info->mMediaBuffer->release();
info->mMediaBuffer = NULL;
}
@@ -2543,11 +2880,22 @@ void OMXCodec::onCmdComplete(OMX_COMMANDTYPE cmd, OMX_U32 data) {
CODEC_LOGV("FLUSH_DONE(%ld)", portIndex);
- CHECK_EQ((int)mPortStatus[portIndex], (int)SHUTTING_DOWN);
- mPortStatus[portIndex] = ENABLED;
+#ifdef QCOM_HARDWARE
+ if (portIndex == (OMX_U32) -1) {
+ CHECK_EQ((int)mPortStatus[kPortIndexInput], (int)SHUTTING_DOWN);
+ mPortStatus[kPortIndexInput] = ENABLED;
+ CHECK_EQ((int)mPortStatus[kPortIndexOutput], (int)SHUTTING_DOWN);
+ mPortStatus[kPortIndexOutput] = ENABLED;
+ } else {
+#endif
+ CHECK_EQ((int)mPortStatus[portIndex], (int)SHUTTING_DOWN);
+ mPortStatus[portIndex] = ENABLED;
- CHECK_EQ(countBuffersWeOwn(mPortBuffers[portIndex]),
- mPortBuffers[portIndex].size());
+ CHECK_EQ(countBuffersWeOwn(mPortBuffers[portIndex]),
+ mPortBuffers[portIndex].size());
+#ifdef QCOM_HARDWARE
+ }
+#endif
if (mSkipCutBuffer != NULL && mPortStatus[kPortIndexOutput] == ENABLED) {
mSkipCutBuffer->clear();
@@ -2812,21 +3160,30 @@ bool OMXCodec::flushPortAsync(OMX_U32 portIndex) {
CHECK(mState == EXECUTING || mState == RECONFIGURING
|| mState == EXECUTING_TO_IDLE);
- CODEC_LOGV("flushPortAsync(%ld): we own %d out of %d buffers already.",
- portIndex, countBuffersWeOwn(mPortBuffers[portIndex]),
- mPortBuffers[portIndex].size());
+#ifdef QCOM_HARDWARE
+ if (portIndex == (OMX_U32) -1 ) {
+ mPortStatus[kPortIndexInput] = SHUTTING_DOWN;
+ mPortStatus[kPortIndexOutput] = SHUTTING_DOWN;
+ } else {
+#endif
+ CODEC_LOGV("flushPortAsync(%ld): we own %d out of %d buffers already.",
+ portIndex, countBuffersWeOwn(mPortBuffers[portIndex]),
+ mPortBuffers[portIndex].size());
- CHECK_EQ((int)mPortStatus[portIndex], (int)ENABLED);
- mPortStatus[portIndex] = SHUTTING_DOWN;
+ CHECK_EQ((int)mPortStatus[portIndex], (int)ENABLED);
+ mPortStatus[portIndex] = SHUTTING_DOWN;
- if ((mQuirks & kRequiresFlushCompleteEmulation)
- && countBuffersWeOwn(mPortBuffers[portIndex])
- == mPortBuffers[portIndex].size()) {
- // No flush is necessary and this component fails to send a
- // flush-complete event in this case.
+ if ((mQuirks & kRequiresFlushCompleteEmulation)
+ && countBuffersWeOwn(mPortBuffers[portIndex])
+ == mPortBuffers[portIndex].size()) {
+ // No flush is necessary and this component fails to send a
+ // flush-complete event in this case.
- return false;
+ return false;
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
status_t err =
mOMX->sendCommand(mNode, OMX_CommandFlush, portIndex);
@@ -2866,16 +3223,27 @@ void OMXCodec::fillOutputBuffers() {
// end-of-output-stream. If we own all input buffers and also own
// all output buffers and we already signalled end-of-input-stream,
// the end-of-output-stream is implied.
- if (mSignalledEOS
+
+#ifdef QCOM_HARDWARE
+ // NOTE: Thumbnail mode needs a call to fillOutputBuffer in order
+ // to get the decoded frame from the component. Currently,
+ // thumbnail mode calls emptyBuffer with an EOS flag on its first
+ // frame and sets mSignalledEOS to true, so without the check for
+ // !mThumbnailMode, fillOutputBuffer will never be called.
+ if (!((mFlags & kClientNeedsFramebuffer) && !strncmp(mComponentName, "OMX.qcom.", 9))){
+#endif
+ if (mSignalledEOS
&& countBuffersWeOwn(mPortBuffers[kPortIndexInput])
== mPortBuffers[kPortIndexInput].size()
&& countBuffersWeOwn(mPortBuffers[kPortIndexOutput])
== mPortBuffers[kPortIndexOutput].size()) {
- mNoMoreOutputData = true;
- mBufferFilled.signal();
-
- return;
+ mNoMoreOutputData = true;
+ mBufferFilled.signal();
+ return;
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexOutput];
for (size_t i = 0; i < buffers->size(); ++i) {
@@ -3086,6 +3454,23 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) {
}
bool releaseBuffer = true;
+#ifdef OMAP_ENHANCEMENT
+ if (mIsEncoder && (mQuirks & kAvoidMemcopyInputRecordingFrames)) {
+ CHECK(mOMXLivesLocally && offset == 0);
+
+ OMX_BUFFERHEADERTYPE *header =
+ (OMX_BUFFERHEADERTYPE *)info->mBuffer;
+
+ CHECK(header->pBuffer == info->mData);
+
+ header->pBuffer =
+ (OMX_U8 *)srcBuffer->data() + srcBuffer->range_offset();
+
+ releaseBuffer = false;
+ info->mMediaBuffer = srcBuffer;
+ } else {
+#endif
+
if (mFlags & kStoreMetaDataInVideoBuffers) {
releaseBuffer = false;
info->mMediaBuffer = srcBuffer;
@@ -3099,13 +3484,53 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) {
CHECK(info->mMediaBuffer == NULL);
info->mMediaBuffer = srcBuffer;
} else {
+#ifdef USE_SAMSUNG_COLORFORMAT
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexInput;
+
+ status_t err = mOMX->getParameter(mNode, OMX_IndexParamPortDefinition,
+ &def, sizeof(def));
+ CHECK_EQ(err, (status_t)OK);
+
+ if (def.eDomain == OMX_PortDomainVideo) {
+ OMX_VIDEO_PORTDEFINITIONTYPE *videoDef = &def.format.video;
+ switch (videoDef->eColorFormat) {
+ case OMX_SEC_COLOR_FormatNV12LVirtualAddress: {
+ CHECK(srcBuffer->data() != NULL);
+ void *pSharedMem = (void *)(srcBuffer->data());
+ memcpy((uint8_t *)info->mData + offset,
+ (const void *)&pSharedMem, sizeof(void *));
+ break;
+ }
+ default:
+ CHECK(srcBuffer->data() != NULL);
+ memcpy((uint8_t *)info->mData + offset,
+ (const uint8_t *)srcBuffer->data()
+ + srcBuffer->range_offset(),
+ srcBuffer->range_length());
+ break;
+ }
+ } else {
+ CHECK(srcBuffer->data() != NULL);
+ memcpy((uint8_t *)info->mData + offset,
+ (const uint8_t *)srcBuffer->data()
+ + srcBuffer->range_offset(),
+ srcBuffer->range_length());
+ }
+#else
CHECK(srcBuffer->data() != NULL) ;
memcpy((uint8_t *)info->mData + offset,
(const uint8_t *)srcBuffer->data()
+ srcBuffer->range_offset(),
srcBuffer->range_length());
+#endif // USE_SAMSUNG_COLORFORMAT
}
+#ifdef OMAP_ENHANCEMENT
+ }
+#endif
+
int64_t lastBufferTimeUs;
CHECK(srcBuffer->meta_data()->findInt64(kKeyTime, &lastBufferTimeUs));
CHECK(lastBufferTimeUs >= 0);
@@ -3163,6 +3588,20 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) {
if (signalEOS) {
flags |= OMX_BUFFERFLAG_EOS;
+#ifdef QCOM_HARDWARE
+ } else if ((mFlags & kClientNeedsFramebuffer) && !strncmp(mComponentName, "OMX.qcom.", 9)) {
+ // Because we don't get an EOS after getting the first frame, we
+ // need to notify the component with OMX_BUFFERFLAG_EOS, set
+ // mNoMoreOutputData to false so fillOutputBuffer gets called on
+ // the first output buffer (see comment in fillOutputBuffer), and
+ // mSignalledEOS must be true so drainInputBuffer is not executed
+ // on extra frames. Setting mFinalStatus to ERROR_END_OF_STREAM as
+ // we dont want to return OK and NULL buffer in read.
+ flags |= OMX_BUFFERFLAG_EOS;
+ mNoMoreOutputData = false;
+ mSignalledEOS = true;
+ mFinalStatus = ERROR_END_OF_STREAM;
+#endif
} else {
mNoMoreOutputData = false;
}
@@ -3191,6 +3630,14 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) {
return false;
}
+ // This component does not ever signal the EOS flag on output buffers,
+ // Thanks for nothing.
+ if (mSignalledEOS && (!strcmp(mComponentName, "OMX.TI.Video.encoder") ||
+ !strcmp(mComponentName, "OMX.TI.720P.Encoder"))) {
+ mNoMoreOutputData = true;
+ mBufferFilled.signal();
+ }
+
info->mStatus = OWNED_BY_COMPONENT;
return true;
@@ -3261,6 +3708,14 @@ status_t OMXCodec::waitForBufferFilled_l() {
return mBufferFilled.wait(mLock);
}
status_t err = mBufferFilled.waitRelative(mLock, kBufferFilledEventTimeOutNs);
+#ifdef QCOM_HARDWARE
+ if ((err == -ETIMEDOUT) && (mPaused == true)){
+ // When the audio playback is paused, the fill buffer maybe timed out
+ // if input data is not available to decode. Hence, considering the
+ // timed out as a valid case.
+ err = OK;
+ }
+#endif
if (err != OK) {
CODEC_LOGE("Timed out waiting for output buffers: %d/%d",
countBuffersWeOwn(mPortBuffers[kPortIndexInput]),
@@ -3485,6 +3940,182 @@ status_t OMXCodec::setAACFormat(
return OK;
}
+#ifdef QCOM_HARDWARE
+void OMXCodec::setAC3Format(int32_t numChannels, int32_t sampleRate) {
+
+ QOMX_AUDIO_PARAM_AC3TYPE profileAC3;
+ QOMX_AUDIO_PARAM_AC3PP profileAC3PP;
+ OMX_INDEXTYPE indexTypeAC3;
+ OMX_INDEXTYPE indexTypeAC3PP;
+ OMX_PARAM_PORTDEFINITIONTYPE portParam;
+
+ //configure input port
+ CODEC_LOGV("setAC3Format samplerate %d, numChannels %d", sampleRate, numChannels);
+ InitOMXParams(&portParam);
+ portParam.nPortIndex = 0;
+ status_t err = mOMX->getParameter(
+ mNode, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+
+ //configure output port
+ portParam.nPortIndex = 1;
+ err = mOMX->getParameter(
+ mNode, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+
+ err = mOMX->getExtensionIndex(mNode, OMX_QCOM_INDEX_PARAM_AC3TYPE, &indexTypeAC3);
+
+ InitOMXParams(&profileAC3);
+ profileAC3.nPortIndex = kPortIndexInput;
+ err = mOMX->getParameter(mNode, indexTypeAC3, &profileAC3, sizeof(profileAC3));
+ CHECK_EQ(err,(status_t)OK);
+
+ profileAC3.nSamplingRate = sampleRate;
+ profileAC3.nChannels = 2;
+ profileAC3.eChannelConfig = OMX_AUDIO_AC3_CHANNEL_CONFIG_2_0;
+
+ CODEC_LOGE("numChannels = %d, profileAC3.nChannels = %d", numChannels, profileAC3.nChannels);
+
+ err = mOMX->setParameter(mNode, indexTypeAC3, &profileAC3, sizeof(profileAC3));
+ CHECK_EQ(err,(status_t)OK);
+
+ //for output port
+ OMX_AUDIO_PARAM_PCMMODETYPE profilePcm;
+ InitOMXParams(&profilePcm);
+ profilePcm.nPortIndex = kPortIndexOutput;
+ err = mOMX->getParameter(mNode, OMX_IndexParamAudioPcm, &profilePcm, sizeof(profilePcm));
+ CHECK_EQ(err, (status_t)OK);
+
+ profilePcm.nSamplingRate = sampleRate;
+ err = mOMX->setParameter(mNode, OMX_IndexParamAudioPcm, &profilePcm, sizeof(profilePcm));
+ CHECK_EQ(err, (status_t)OK);
+ mOMX->getExtensionIndex(mNode, OMX_QCOM_INDEX_PARAM_AC3PP, &indexTypeAC3PP);
+
+ InitOMXParams(&profileAC3PP);
+ profileAC3PP.nPortIndex = kPortIndexInput;
+ err = mOMX->getParameter(mNode, indexTypeAC3PP, &profileAC3PP, sizeof(profileAC3PP));
+ CHECK_EQ(err, (status_t)OK);
+
+ int i;
+ int channel_routing[6];
+
+ for (i=0; i<6; i++) {
+ channel_routing[i] = -1;
+ }
+ for (i=0; i<6; i++) {
+ profileAC3PP.eChannelRouting[i] = (OMX_AUDIO_AC3_CHANNEL_ROUTING)channel_routing[i];
+ }
+
+ profileAC3PP.eChannelRouting[0] = OMX_AUDIO_AC3_CHANNEL_LEFT;
+ profileAC3PP.eChannelRouting[1] = OMX_AUDIO_AC3_CHANNEL_RIGHT;
+ err = mOMX->setParameter(mNode, indexTypeAC3PP, &profileAC3PP, sizeof(profileAC3PP));
+ CHECK_EQ(err, (status_t)OK);
+
+}
+
+
+status_t OMXCodec::setWMAFormat(const sp<MetaData> &meta)
+{
+ if (mIsEncoder) {
+ CODEC_LOGE("WMA encoding not supported");
+ return OK;
+ } else {
+ int32_t version;
+ OMX_AUDIO_PARAM_WMATYPE paramWMA;
+ QOMX_AUDIO_PARAM_WMA10PROTYPE paramWMA10;
+ CHECK(meta->findInt32(kKeyWMAVersion, &version));
+ int32_t numChannels;
+ int32_t bitRate;
+ int32_t sampleRate;
+ int32_t encodeOptions;
+ int32_t blockAlign;
+ int32_t bitspersample;
+ int32_t formattag;
+ int32_t advencopt1;
+ int32_t advencopt2;
+ int32_t VirtualPktSize;
+ if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ CHECK(meta->findInt32(kKeyWMABitspersample, &bitspersample));
+ CHECK(meta->findInt32(kKeyWMAFormatTag, &formattag));
+ CHECK(meta->findInt32(kKeyWMAAdvEncOpt1,&advencopt1));
+ CHECK(meta->findInt32(kKeyWMAAdvEncOpt2,&advencopt2));
+ CHECK(meta->findInt32(kKeyWMAVirPktSize,&VirtualPktSize));
+ }
+ if(version==kTypeWMA) {
+ InitOMXParams(&paramWMA);
+ paramWMA.nPortIndex = kPortIndexInput;
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ InitOMXParams(&paramWMA10);
+ paramWMA10.nPortIndex = kPortIndexInput;
+ }
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ CHECK(meta->findInt32(kKeyBitRate, &bitRate));
+ CHECK(meta->findInt32(kKeyWMAEncodeOpt, &encodeOptions));
+ CHECK(meta->findInt32(kKeyWMABlockAlign, &blockAlign));
+ CODEC_LOGV("Channels: %d, SampleRate: %d, BitRate; %d"
+ "EncodeOptions: %d, blockAlign: %d", numChannels,
+ sampleRate, bitRate, encodeOptions, blockAlign);
+ if(sampleRate>48000 || numChannels>2)
+ {
+ ALOGE("Unsupported samplerate/channels");
+ return ERROR_UNSUPPORTED;
+ }
+ if(version==kTypeWMAPro || version==kTypeWMALossLess)
+ {
+ CODEC_LOGV("Bitspersample: %d, wmaformattag: %d,"
+ "advencopt1: %d, advencopt2: %d VirtualPktSize %d", bitspersample,
+ formattag, advencopt1, advencopt2, VirtualPktSize);
+ }
+ status_t err = OK;
+ OMX_INDEXTYPE index;
+ if(version==kTypeWMA) {
+ err = mOMX->getParameter(
+ mNode, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ mOMX->getExtensionIndex(mNode,"OMX.Qualcomm.index.audio.wma10Pro",&index);
+ err = mOMX->getParameter(
+ mNode, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ CHECK_EQ(err, (status_t)OK);
+ if(version==kTypeWMA) {
+ paramWMA.nChannels = numChannels;
+ paramWMA.nSamplingRate = sampleRate;
+ paramWMA.nEncodeOptions = encodeOptions;
+ paramWMA.nBitRate = bitRate;
+ paramWMA.nBlockAlign = blockAlign;
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ paramWMA10.nChannels = numChannels;
+ paramWMA10.nSamplingRate = sampleRate;
+ paramWMA10.nEncodeOptions = encodeOptions;
+ paramWMA10.nBitRate = bitRate;
+ paramWMA10.nBlockAlign = blockAlign;
+ }
+ if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ paramWMA10.advancedEncodeOpt = advencopt1;
+ paramWMA10.advancedEncodeOpt2 = advencopt2;
+ paramWMA10.formatTag = formattag;
+ paramWMA10.validBitsPerSample = bitspersample;
+ paramWMA10.nVirtualPktSize = VirtualPktSize;
+ }
+ if(version==kTypeWMA) {
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ err = mOMX->setParameter(
+ mNode, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ return err;
+ }
+}
+#endif
+
void OMXCodec::setG711Format(int32_t numChannels) {
CHECK(!mIsEncoder);
setRawAudioFormat(kPortIndexInput, 8000, numChannels);
@@ -3726,19 +4357,32 @@ status_t OMXCodec::stopOmxComponent_l() {
CODEC_LOGV("This component requires a flush before transitioning "
"from EXECUTING to IDLE...");
- bool emulateInputFlushCompletion =
- !flushPortAsync(kPortIndexInput);
+#ifdef QCOM_HARDWARE
+ //DSP supports flushing of ports simultaneously.
+ //Flushing individual port is not supported.
+ if(mQuirks & kRequiresGlobalFlush) {
+ bool emulateFlushCompletion = !flushPortAsync(kPortIndexBoth);
+ if (emulateFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexBoth);
+ }
+ } else {
+#endif
+ bool emulateInputFlushCompletion =
+ !flushPortAsync(kPortIndexInput);
- bool emulateOutputFlushCompletion =
- !flushPortAsync(kPortIndexOutput);
+ bool emulateOutputFlushCompletion =
+ !flushPortAsync(kPortIndexOutput);
- if (emulateInputFlushCompletion) {
- onCmdComplete(OMX_CommandFlush, kPortIndexInput);
- }
+ if (emulateInputFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexInput);
+ }
- if (emulateOutputFlushCompletion) {
- onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
+ if (emulateOutputFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
} else {
mPortStatus[kPortIndexInput] = SHUTTING_DOWN;
mPortStatus[kPortIndexOutput] = SHUTTING_DOWN;
@@ -3846,16 +4490,39 @@ status_t OMXCodec::read(
CHECK_EQ((int)mState, (int)EXECUTING);
- bool emulateInputFlushCompletion = !flushPortAsync(kPortIndexInput);
- bool emulateOutputFlushCompletion = !flushPortAsync(kPortIndexOutput);
+#ifdef QCOM_HARDWARE
+ //DSP supports flushing of ports simultaneously. Flushing individual port is not supported.
- if (emulateInputFlushCompletion) {
- onCmdComplete(OMX_CommandFlush, kPortIndexInput);
- }
+ if(mQuirks & kRequiresGlobalFlush) {
+ bool emulateFlushCompletion = !flushPortAsync(kPortIndexBoth);
+ if (emulateFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexBoth);
+ }
+ } else {
+
+ //DSP supports flushing of ports simultaneously.
+ //Flushing individual port is not supported.
+ if(mQuirks & kRequiresGlobalFlush) {
+ bool emulateFlushCompletion = !flushPortAsync(kPortIndexBoth);
+ if (emulateFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexBoth);
+ }
+ } else {
+#endif
+ bool emulateInputFlushCompletion = !flushPortAsync(kPortIndexInput);
+ bool emulateOutputFlushCompletion = !flushPortAsync(kPortIndexOutput);
+
+ if (emulateInputFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexInput);
+ }
- if (emulateOutputFlushCompletion) {
- onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
+ if (emulateOutputFlushCompletion) {
+ onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
+ }
+#ifdef QCOM_HARDWARE
+ }
}
+#endif
while (mSeekTimeUs >= 0) {
if ((err = waitForBufferFilled_l()) != OK) {
@@ -4021,7 +4688,22 @@ static const char *colorFormatString(OMX_COLOR_FORMATTYPE type) {
if (type == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar) {
return "OMX_TI_COLOR_FormatYUV420PackedSemiPlanar";
- } else if (type == OMX_QCOM_COLOR_FormatYVU420SemiPlanar) {
+ }
+#ifdef USE_SAMSUNG_COLORFORMAT
+ if (type == OMX_SEC_COLOR_FormatNV12TPhysicalAddress) {
+ return "OMX_SEC_COLOR_FormatNV12TPhysicalAddress";
+ }
+ if (type == OMX_SEC_COLOR_FormatNV12LPhysicalAddress) {
+ return "OMX_SEC_COLOR_FormatNV12LPhysicalAddress";
+ }
+ if (type == OMX_SEC_COLOR_FormatNV12LVirtualAddress) {
+ return "OMX_SEC_COLOR_FormatNV12LVirtualAddress";
+ }
+ if (type == OMX_SEC_COLOR_FormatNV12Tiled) {
+ return "OMX_SEC_COLOR_FormatNV12Tiled";
+ }
+#endif // USE_SAMSUNG_COLORFORMAT
+ else if (type == OMX_QCOM_COLOR_FormatYVU420SemiPlanar) {
return "OMX_QCOM_COLOR_FormatYVU420SemiPlanar";
} else if (type < 0 || (size_t)type >= numNames) {
return "UNKNOWN";
@@ -4418,9 +5100,46 @@ void OMXCodec::initOutputFormat(const sp<MetaData> &inputFormat) {
mOutputFormat->setInt32(kKeyChannelCount, numChannels);
mOutputFormat->setInt32(kKeySampleRate, sampleRate);
mOutputFormat->setInt32(kKeyBitRate, bitRate);
+#ifdef QCOM_HARDWARE
+ } else if (audio_def->eEncoding == OMX_AUDIO_CodingQCELP13 ) {
+ mOutputFormat->setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_QCELP);
+ int32_t numChannels, sampleRate, bitRate;
+ inputFormat->findInt32(kKeyChannelCount, &numChannels);
+ inputFormat->findInt32(kKeySampleRate, &sampleRate);
+ inputFormat->findInt32(kKeyBitRate, &bitRate);
+ mOutputFormat->setInt32(kKeyChannelCount, numChannels);
+ mOutputFormat->setInt32(kKeySampleRate, sampleRate);
+ mOutputFormat->setInt32(kKeyBitRate, bitRate);
+ } else if (audio_def->eEncoding == OMX_AUDIO_CodingEVRC ) {
+ mOutputFormat->setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_EVRC);
+ int32_t numChannels, sampleRate, bitRate;
+ inputFormat->findInt32(kKeyChannelCount, &numChannels);
+ inputFormat->findInt32(kKeySampleRate, &sampleRate);
+ inputFormat->findInt32(kKeyBitRate, &bitRate);
+ mOutputFormat->setInt32(kKeyChannelCount, numChannels);
+ mOutputFormat->setInt32(kKeySampleRate, sampleRate);
+ mOutputFormat->setInt32(kKeyBitRate, bitRate);
} else {
- CHECK(!"Should not be here. Unknown audio encoding.");
+ AString mimeType;
+ if(OK == QCOMXCodec::checkQCFormats(audio_def->eEncoding, &mimeType)) {
+ mOutputFormat->setCString(
+ kKeyMIMEType, mimeType.c_str());
+ int32_t numChannels, sampleRate, bitRate;
+ inputFormat->findInt32(kKeyChannelCount, &numChannels);
+ inputFormat->findInt32(kKeySampleRate, &sampleRate);
+ inputFormat->findInt32(kKeyBitRate, &bitRate);
+ mOutputFormat->setInt32(kKeyChannelCount, numChannels);
+ mOutputFormat->setInt32(kKeySampleRate, sampleRate);
+ mOutputFormat->setInt32(kKeyBitRate, bitRate);
+#endif
+ } else {
+ CHECK(!"Should not be here. Unknown audio encoding.");
+ }
+#ifdef QCOM_HARDWARE
}
+#endif
break;
}
@@ -4520,6 +5239,15 @@ status_t OMXCodec::pause() {
////////////////////////////////////////////////////////////////////////////////
+#ifdef OMAP_ENHANCEMENT
+void OMXCodec::restorePatchedDataPointer(BufferInfo *info) {
+ CHECK(mIsEncoder && (mQuirks & kAvoidMemcopyInputRecordingFrames));
+ CHECK(mOMXLivesLocally);
+
+ OMX_BUFFERHEADERTYPE *header = (OMX_BUFFERHEADERTYPE *)info->mBuffer;
+ header->pBuffer = (OMX_U8 *)info->mData;
+}
+#endif
status_t QueryCodecs(
const sp<IOMX> &omx,
const char *mime, bool queryDecoders, bool hwCodecOnly,
@@ -4621,6 +5349,108 @@ status_t QueryCodecs(
return QueryCodecs(omx, mimeType, queryDecoders, false /*hwCodecOnly*/, results);
}
+#ifdef QCOM_HARDWARE
+void OMXCodec::setEVRCFormat(int32_t numChannels, int32_t sampleRate, int32_t bitRate) {
+ if (mIsEncoder) {
+ CHECK(numChannels == 1);
+ //////////////// input port ////////////////////
+ setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+ //////////////// output port ////////////////////
+ // format
+ OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+ format.nPortIndex = kPortIndexOutput;
+ format.nIndex = 0;
+ status_t err = OMX_ErrorNone;
+ while (OMX_ErrorNone == err) {
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+ if (format.eEncoding == OMX_AUDIO_CodingEVRC) {
+ break;
+ }
+ format.nIndex++;
+ }
+ CHECK_EQ((status_t)OK, err);
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+
+ // port definition
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexOutput;
+ def.format.audio.cMIMEType = NULL;
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+ def.format.audio.bFlagErrorConcealment = OMX_TRUE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingEVRC;
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+
+ // profile
+ OMX_AUDIO_PARAM_EVRCTYPE profile;
+ InitOMXParams(&profile);
+ profile.nPortIndex = kPortIndexOutput;
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioEvrc,
+ &profile, sizeof(profile)), (status_t)OK);
+ profile.nChannels = numChannels;
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamAudioEvrc,
+ &profile, sizeof(profile)), (status_t)OK);
+ }
+ else{
+ ALOGI("EVRC decoder \n");
+ }
+}
+
+void OMXCodec::setQCELPFormat(int32_t numChannels, int32_t sampleRate, int32_t bitRate) {
+ if (mIsEncoder) {
+ CHECK(numChannels == 1);
+ //////////////// input port ////////////////////
+ setRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+ //////////////// output port ////////////////////
+ // format
+ OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+ format.nPortIndex = kPortIndexOutput;
+ format.nIndex = 0;
+ status_t err = OMX_ErrorNone;
+ while (OMX_ErrorNone == err) {
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+ if (format.eEncoding == OMX_AUDIO_CodingQCELP13) {
+ break;
+ }
+ format.nIndex++;
+ }
+ CHECK_EQ((status_t)OK, err);
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+
+ // port definition
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = kPortIndexOutput;
+ def.format.audio.cMIMEType = NULL;
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+ def.format.audio.bFlagErrorConcealment = OMX_TRUE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingQCELP13;
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+
+ // profile
+ OMX_AUDIO_PARAM_QCELP13TYPE profile;
+ InitOMXParams(&profile);
+ profile.nPortIndex = kPortIndexOutput;
+ CHECK_EQ(mOMX->getParameter(mNode, OMX_IndexParamAudioQcelp13,
+ &profile, sizeof(profile)), (status_t)OK);
+ profile.nChannels = numChannels;
+ CHECK_EQ(mOMX->setParameter(mNode, OMX_IndexParamAudioQcelp13,
+ &profile, sizeof(profile)), (status_t)OK);
+ }
+ else{
+ ALOGI("QCELP decoder \n");
+ }
+}
+#endif
+
// These are supposed be equivalent to the logic in
// "audio_channel_out_mask_from_count".
status_t getOMXChannelMapping(size_t numChannels, OMX_AUDIO_CHANNELTYPE map[]) {
diff --git a/media/libstagefright/PCMExtractor.cpp b/media/libstagefright/PCMExtractor.cpp
new file mode 100644
index 0000000..bb26bcd
--- /dev/null
+++ b/media/libstagefright/PCMExtractor.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Author: Andreas Gustafsson (andreas.a.gustafsson@stericsson.com)
+ * for ST-Ericsson
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "PCMExtractor"
+#include <utils/Log.h>
+
+#include "include/PCMExtractor.h"
+
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace android {
+
+/**
+* The default buffer size.
+*/
+static const uint16_t kDefaultNumChannels = 2;
+
+/**
+* The default Sample rate.
+*/
+static const uint32_t kDefaultSampleRate = 48000;
+
+/**
+* Bits per sample.
+*/
+static const uint16_t kDefaultBitsPerSample = 16;
+
+/**
+* The default buffer size.
+*/
+static const uint32_t kDefaultBufferSize = 4800;
+
+/**
+* Buffer duration in ms, to be used for input
+*/
+static const uint16_t kInputBufferDuration = 64;
+
+/**
+* Buffer granulairity in samples to be used for input.
+*/
+static const uint16_t kBufferGranularityInSamples = 16;
+
+struct PCMSource : public MediaSource {
+ PCMSource(
+ const sp<DataSource> &dataSource,
+ const sp<MetaData> &meta,
+ int32_t bitsPerSample,
+ off_t offset, size_t size);
+
+ virtual status_t start(MetaData *params = NULL);
+ virtual status_t stop();
+ virtual sp<MetaData> getFormat();
+
+ virtual status_t read(
+ MediaBuffer **buffer, const ReadOptions *options = NULL);
+
+protected:
+ virtual ~PCMSource();
+
+private:
+ static const size_t kMaxFrameSize;
+
+ sp<DataSource> mDataSource;
+ sp<MetaData> mMeta;
+ int32_t mSampleRate;
+ int32_t mNumChannels;
+ int32_t mBitsPerSample;
+ off_t mOffset;
+ size_t mSize;
+ bool mStarted;
+ MediaBufferGroup *mGroup;
+ off_t mCurrentPos;
+ uint32_t mBufferSize;
+
+ DISALLOW_EVIL_CONSTRUCTORS(PCMSource);
+};
+
+PCMExtractor::PCMExtractor(const sp<DataSource> &source)
+ : mDataSource(source),
+ mValidFormat(false) {
+ mInitCheck = init();
+}
+
+PCMExtractor::~PCMExtractor() {
+}
+
+sp<MetaData> PCMExtractor::getMetaData() {
+ sp<MetaData> meta = new MetaData;
+
+ if (mInitCheck != OK) {
+ return meta;
+ }
+
+ meta->setCString(kKeyMIMEType, "audio/raw");
+
+ return meta;
+}
+
+size_t PCMExtractor::countTracks() {
+ return mInitCheck == OK ? 1 : 0;
+}
+
+sp<MediaSource> PCMExtractor::getTrack(size_t index) {
+ if (mInitCheck != OK || index > 0) {
+ return NULL;
+ }
+
+ return new PCMSource(
+ mDataSource, mTrackMeta,
+ kDefaultBitsPerSample, mDataOffset, mDataSize);
+}
+
+sp<MetaData> PCMExtractor::getTrackMetaData(
+ size_t index, uint32_t flags) {
+ if (mInitCheck != OK || index > 0) {
+ return NULL;
+ }
+
+ return mTrackMeta;
+}
+
+status_t PCMExtractor::init() {
+ mDataOffset = 0;
+ mDataSize = 0;
+ mValidFormat = true;
+ mTrackMeta = new MetaData;mTrackMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+ mTrackMeta->setInt32(kKeyChannelCount, kDefaultNumChannels);
+ mTrackMeta->setInt32(kKeySampleRate, kDefaultSampleRate);
+ return OK;
+}
+
+const size_t PCMSource::kMaxFrameSize = 4800;
+
+PCMSource::PCMSource(
+ const sp<DataSource> &dataSource,
+ const sp<MetaData> &meta,
+ int32_t bitsPerSample,
+ off_t offset, size_t size)
+ : mDataSource(dataSource),
+ mMeta(meta),
+ mSampleRate(0),
+ mNumChannels(0),
+ mBitsPerSample(bitsPerSample),
+ mOffset(offset),
+ mSize(size),
+ mStarted(false),
+ mGroup(NULL),
+ mBufferSize(0) {
+ CHECK(mMeta->findInt32(kKeySampleRate, &mSampleRate));
+ CHECK(mMeta->findInt32(kKeyChannelCount, &mNumChannels));
+}
+
+PCMSource::~PCMSource() {
+ if (mStarted) {
+ stop();
+ }
+}
+
+status_t PCMSource::start(MetaData *params) {
+ CHECK(!mStarted);
+
+ size_t size = kDefaultBufferSize;
+
+ if (mSampleRate != 0 && mNumChannels != 0) {
+ mBufferSize = mSampleRate * kInputBufferDuration / 1000 * mNumChannels * 2;
+ size_t granularity = kBufferGranularityInSamples * 2 * mNumChannels;
+ mBufferSize = (mBufferSize / granularity) * granularity;
+ }
+ mGroup = new MediaBufferGroup;
+ mGroup->add_buffer(new MediaBuffer(mBufferSize));
+
+ if (mBitsPerSample == 8) {
+ // As a temporary buffer for 8->16 bit conversion.
+ mGroup->add_buffer(new MediaBuffer(mBufferSize));
+ }
+
+ mCurrentPos = mOffset;
+
+ mStarted = true;
+ return OK;
+}
+
+status_t PCMSource::stop() {
+
+ CHECK(mStarted);
+ delete mGroup;
+ mGroup = NULL;
+
+ mStarted = false;
+ return OK;
+}
+
+sp<MetaData> PCMSource::getFormat() {
+ return mMeta;
+}
+
+status_t PCMSource::read(
+ MediaBuffer **out, const ReadOptions *options) {
+ *out = NULL;
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode seek = ReadOptions::SEEK_CLOSEST_SYNC;
+ if (options != NULL && options->getSeekTo(&seekTimeUs,&seek)) {
+ int64_t pos = (seekTimeUs * mSampleRate) / 1000000 * mNumChannels * 2;
+ if (pos > mSize) {
+ pos = mSize;
+ }
+ mCurrentPos = pos + mOffset;
+ }
+
+ MediaBuffer *buffer;
+ status_t err = mGroup->acquire_buffer(&buffer);
+ if (err != OK) {
+ return err;
+ }
+
+ ssize_t n = mDataSource->readAt(
+ mCurrentPos, buffer->data(), mBufferSize);
+ if (n <= 0) {
+ buffer->release();
+ buffer = NULL;
+ return ERROR_END_OF_STREAM;
+ }
+
+ mCurrentPos += n;
+
+ buffer->set_range(0, n);
+
+ if (mBitsPerSample == 8) {
+ // Convert 8-bit unsigned samples to 16-bit signed.
+
+ MediaBuffer *tmp;
+ CHECK_EQ(mGroup->acquire_buffer(&tmp), (status_t)OK);
+
+ // The new buffer holds the sample number of samples, but each
+ // one is 2 bytes wide.
+ tmp->set_range(0, 2 * n);
+
+ int16_t *dst = (int16_t *)tmp->data();
+ const uint8_t *src = (const uint8_t *)buffer->data();
+ while (n-- > 0) {
+ *dst++ = ((int16_t)(*src) - 128) * 256;
+ ++src;
+ }
+
+ buffer->release();
+ buffer = tmp;
+ } else if (mBitsPerSample == 24) {
+ // Convert 24-bit signed samples to 16-bit signed.
+
+ const uint8_t *src =
+ (const uint8_t *)buffer->data() + buffer->range_offset();
+ int16_t *dst = (int16_t *)src;
+
+ size_t numSamples = buffer->range_length() / 3;
+ for (size_t i = 0; i < numSamples; ++i) {
+ int32_t x = (int32_t)(src[0] | src[1] << 8 | src[2] << 16);
+ x = (x << 8) >> 8; // sign extension
+
+ x = x >> 8;
+ *dst++ = (int16_t)x;
+ src += 3;
+ }
+
+ buffer->set_range(buffer->range_offset(), 2 * numSamples);
+ }
+
+ size_t bytesPerSample = mBitsPerSample >> 3;
+
+ buffer->meta_data()->setInt64(
+ kKeyTime,
+ 1000000LL * (mCurrentPos - mOffset)
+ / (mNumChannels * bytesPerSample) / mSampleRate);
+
+
+ *out = buffer;
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/QCMediaDefs.cpp b/media/libstagefright/QCMediaDefs.cpp
new file mode 100644
index 0000000..5e8b84f
--- /dev/null
+++ b/media/libstagefright/QCMediaDefs.cpp
@@ -0,0 +1,55 @@
+/*Copyright (c) 2012 - 2013, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <QCMediaDefs.h>
+
+namespace android {
+
+const char *MEDIA_MIMETYPE_AUDIO_EVRC = "audio/evrc";
+
+const char *MEDIA_MIMETYPE_VIDEO_WMV = "video/x-ms-wmv";
+const char *MEDIA_MIMETYPE_AUDIO_WMA = "audio/x-ms-wma";
+const char *MEDIA_MIMETYPE_CONTAINER_ASF = "video/x-ms-asf";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX = "video/divx";
+const char *MEDIA_MIMETYPE_AUDIO_AC3 = "audio/ac3";
+const char *MEDIA_MIMETYPE_CONTAINER_AAC = "audio/aac";
+const char *MEDIA_MIMETYPE_CONTAINER_QCP = "audio/vnd.qcelp";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX311 = "video/divx311";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX4 = "video/divx4";
+
+const char *MEDIA_MIMETYPE_CONTAINER_MPEG2 = "video/mp2";
+
+const char *MEDIA_MIMETYPE_CONTAINER_3G2 = "video/3g2";
+const char *MEDIA_MIMETYPE_AUDIO_DTS = "audio/dts";
+
+const char *MEDIA_MIMETYPE_AUDIO_DTS_LBR = "audio/dts-lbr";
+const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
+const char *MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS = "audio/amr-wb-plus";
+const char *MEDIA_MIMETYPE_CONTAINER_QCMPEG2TS = "video/qc-mp2ts";
+} // namespace android
+
diff --git a/media/libstagefright/QCOMXCodec.cpp b/media/libstagefright/QCOMXCodec.cpp
new file mode 100644
index 0000000..0d5768a
--- /dev/null
+++ b/media/libstagefright/QCOMXCodec.cpp
@@ -0,0 +1,592 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "QCOMXCodec"
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecList.h>
+
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/QCOMXCodec.h>
+#include <media/stagefright/OMXCodec.h>
+#include <QCMetaData.h>
+#include <QCMediaDefs.h>
+#include <OMX_QCOMExtns.h>
+
+#include <OMX_Component.h>
+#include <QOMX_AudioExtensions.h>
+
+
+namespace android {
+
+uint32_t QCOMXCodec::getQCComponentQuirks(const MediaCodecList *list, size_t index) {
+ uint32_t quirks = 0;
+
+ if (list->codecHasQuirk(
+ index, "requires-wma-pro-component")) {
+ quirks |= kRequiresWMAProComponent;
+ }
+ return quirks;
+}
+
+void QCOMXCodec::setASFQuirks(uint32_t quirks, const sp<MetaData> &meta, const char* componentName) {
+ if(quirks & kRequiresWMAProComponent)
+ {
+ int32_t version;
+ CHECK(meta->findInt32(kKeyWMAVersion, &version));
+ if(version==kTypeWMA) {
+ componentName = "OMX.qcom.audio.decoder.wma";
+ } else if(version==kTypeWMAPro) {
+ componentName= "OMX.qcom.audio.decoder.wma10Pro";
+ } else if(version==kTypeWMALossLess) {
+ componentName= "OMX.qcom.audio.decoder.wmaLossLess";
+ }
+ }
+}
+
+template<class T>
+static void InitOMXParams(T *params) {
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+
+status_t QCOMXCodec::configureDIVXCodec(const sp<MetaData> &meta, char* mime, sp<IOMX> OMXhandle, IOMX::node_id nodeID, int port_index) {
+ status_t err = OK;
+ if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX4, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX311, mime)) {
+ ALOGV("Setting the QOMX_VIDEO_PARAM_DIVXTYPE params ");
+ QOMX_VIDEO_PARAM_DIVXTYPE paramDivX;
+ InitOMXParams(&paramDivX);
+ paramDivX.nPortIndex = port_index;
+ int32_t DivxVersion = 0;
+ CHECK(meta->findInt32(kKeyDivXVersion,&DivxVersion));
+ ALOGV("Divx Version Type %d\n",DivxVersion);
+
+ if(DivxVersion == kTypeDivXVer_4) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat4;
+ } else if(DivxVersion == kTypeDivXVer_5) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat5;
+ } else if(DivxVersion == kTypeDivXVer_6) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat6;
+ } else if(DivxVersion == kTypeDivXVer_3_11 ) {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormat311;
+ } else {
+ paramDivX.eFormat = QOMX_VIDEO_DIVXFormatUnused;
+ }
+ paramDivX.eProfile = (QOMX_VIDEO_DIVXPROFILETYPE)0; //Not used for now.
+
+ err = OMXhandle->setParameter(nodeID,
+ (OMX_INDEXTYPE)OMX_QcomIndexParamVideoDivx,
+ &paramDivX, sizeof(paramDivX));
+ }
+
+ return err;
+}
+
+void QCOMXCodec::checkAndAddRawFormat(OMXCodec *handle, const sp<MetaData> &meta){
+ uint32_t type;
+ const void *data;
+ size_t size;
+
+ if (meta->findData(kKeyRawCodecSpecificData, &type, &data, &size)) {
+ ALOGV("OMXCodec::configureCodec found kKeyRawCodecSpecificData of size %d\n", size);
+ handle->addCodecSpecificData(data, size);
+ }
+
+}
+
+status_t QCOMXCodec::setQCFormat(const sp<MetaData> &meta, char* mime, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, OMXCodec *handle, bool isEncoder ) {
+ ALOGV("setQCFormat -- called ");
+ status_t err = OK;
+ if ((!strcasecmp(MEDIA_MIMETYPE_AUDIO_AC3, mime)) ||
+ (!strcasecmp(MEDIA_MIMETYPE_AUDIO_EAC3, mime))){
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setAC3Format(numChannels, sampleRate, OMXhandle, nodeID);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_EVRC, mime)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setEVRCFormat(numChannels, sampleRate, OMXhandle, nodeID, handle,isEncoder );
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_QCELP, mime)) {
+ int32_t numChannels, sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ setQCELPFormat(numChannels, sampleRate, OMXhandle, nodeID, handle,isEncoder);
+ } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_WMA, mime)) {
+ err = setWMAFormat(meta, OMXhandle, nodeID, isEncoder);
+ }
+ return err;
+}
+
+
+void QCOMXCodec::setEVRCFormat(int32_t numChannels, int32_t sampleRate, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, OMXCodec *handle, bool isEncoder ) {
+ ALOGV("setEVRCFormat -- called ");
+ if (isEncoder) {
+ CHECK(numChannels == 1);
+ //////////////// input port ////////////////////
+ handle->setRawAudioFormat(OMXCodec::kPortIndexInput, sampleRate, numChannels);
+ //////////////// output port ////////////////////
+ // format
+ OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+ format.nPortIndex = OMXCodec::kPortIndexOutput;
+ format.nIndex = 0;
+ status_t err = OMX_ErrorNone;
+ while (OMX_ErrorNone == err) {
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+ if (format.eEncoding == OMX_AUDIO_CodingEVRC) {
+ break;
+ }
+ format.nIndex++;
+ }
+ CHECK_EQ((status_t)OK, err);
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+
+ // port definition
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = OMXCodec::kPortIndexOutput;
+ def.format.audio.cMIMEType = NULL;
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+ def.format.audio.bFlagErrorConcealment = OMX_TRUE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingEVRC;
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+
+ // profile
+ OMX_AUDIO_PARAM_EVRCTYPE profile;
+ InitOMXParams(&profile);
+ profile.nPortIndex = OMXCodec::kPortIndexOutput;
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamAudioEvrc,
+ &profile, sizeof(profile)), (status_t)OK);
+ profile.nChannels = numChannels;
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamAudioEvrc,
+ &profile, sizeof(profile)), (status_t)OK);
+ }
+ else{
+ ALOGI("EVRC decoder \n");
+ }
+}
+
+
+void QCOMXCodec::setQCELPFormat(int32_t numChannels, int32_t sampleRate, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, OMXCodec *handle, bool isEncoder ) {
+ if (isEncoder) {
+ CHECK(numChannels == 1);
+ //////////////// input port ////////////////////
+ handle->setRawAudioFormat(OMXCodec::kPortIndexInput, sampleRate, numChannels);
+ //////////////// output port ////////////////////
+ // format
+ OMX_AUDIO_PARAM_PORTFORMATTYPE format;
+ format.nPortIndex = OMXCodec::kPortIndexOutput;
+ format.nIndex = 0;
+ status_t err = OMX_ErrorNone;
+ while (OMX_ErrorNone == err) {
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+ if (format.eEncoding == OMX_AUDIO_CodingQCELP13) {
+ break;
+ }
+ format.nIndex++;
+ }
+ CHECK_EQ((status_t)OK, err);
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamAudioPortFormat,
+ &format, sizeof(format)), (status_t)OK);
+
+ // port definition
+ OMX_PARAM_PORTDEFINITIONTYPE def;
+ InitOMXParams(&def);
+ def.nPortIndex = OMXCodec::kPortIndexOutput;
+ def.format.audio.cMIMEType = NULL;
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+ def.format.audio.bFlagErrorConcealment = OMX_TRUE;
+ def.format.audio.eEncoding = OMX_AUDIO_CodingQCELP13;
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamPortDefinition,
+ &def, sizeof(def)), (status_t)OK);
+
+ // profile
+ OMX_AUDIO_PARAM_QCELP13TYPE profile;
+ InitOMXParams(&profile);
+ profile.nPortIndex = OMXCodec::kPortIndexOutput;
+ CHECK_EQ(OMXhandle->getParameter(nodeID, OMX_IndexParamAudioQcelp13,
+ &profile, sizeof(profile)), (status_t)OK);
+ profile.nChannels = numChannels;
+ CHECK_EQ(OMXhandle->setParameter(nodeID, OMX_IndexParamAudioQcelp13,
+ &profile, sizeof(profile)), (status_t)OK);
+ }
+ else {
+ ALOGI("QCELP decoder \n");
+ }
+}
+
+status_t QCOMXCodec::setWMAFormat(const sp<MetaData> &meta, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, bool isEncoder ) {
+ ALOGV("setWMAFormat Called");
+ if (isEncoder) {
+ ALOGE("WMA encoding not supported");
+ return OK;
+ } else {
+ int32_t version;
+ OMX_AUDIO_PARAM_WMATYPE paramWMA;
+ QOMX_AUDIO_PARAM_WMA10PROTYPE paramWMA10;
+ CHECK(meta->findInt32(kKeyWMAVersion, &version));
+ int32_t numChannels;
+ int32_t bitRate;
+ int32_t sampleRate;
+ int32_t encodeOptions;
+ int32_t blockAlign;
+ int32_t bitspersample;
+ int32_t formattag;
+ int32_t advencopt1;
+ int32_t advencopt2;
+ int32_t VirtualPktSize;
+ if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ CHECK(meta->findInt32(kKeyWMABitspersample, &bitspersample));
+ CHECK(meta->findInt32(kKeyWMAFormatTag, &formattag));
+ CHECK(meta->findInt32(kKeyWMAAdvEncOpt1,&advencopt1));
+ CHECK(meta->findInt32(kKeyWMAAdvEncOpt2,&advencopt2));
+ CHECK(meta->findInt32(kKeyWMAVirPktSize,&VirtualPktSize));
+ }
+ if(version==kTypeWMA) {
+ InitOMXParams(&paramWMA);
+ paramWMA.nPortIndex = OMXCodec::kPortIndexInput;
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ InitOMXParams(&paramWMA10);
+ paramWMA10.nPortIndex = OMXCodec::kPortIndexInput;
+ }
+ CHECK(meta->findInt32(kKeyChannelCount, &numChannels));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+ CHECK(meta->findInt32(kKeyBitRate, &bitRate));
+ CHECK(meta->findInt32(kKeyWMAEncodeOpt, &encodeOptions));
+ CHECK(meta->findInt32(kKeyWMABlockAlign, &blockAlign));
+ ALOGV("Channels: %d, SampleRate: %d, BitRate; %d"
+ "EncodeOptions: %d, blockAlign: %d", numChannels,
+ sampleRate, bitRate, encodeOptions, blockAlign);
+ if(sampleRate>48000 || numChannels>2)
+ {
+ ALOGE("Unsupported samplerate/channels");
+ return ERROR_UNSUPPORTED;
+ }
+ if(version==kTypeWMAPro || version==kTypeWMALossLess)
+ {
+ ALOGV("Bitspersample: %d, wmaformattag: %d,"
+ "advencopt1: %d, advencopt2: %d VirtualPktSize %d", bitspersample,
+ formattag, advencopt1, advencopt2, VirtualPktSize);
+ }
+ status_t err = OK;
+ OMX_INDEXTYPE index;
+ if(version==kTypeWMA) {
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ OMXhandle->getExtensionIndex(nodeID,"OMX.Qualcomm.index.audio.wma10Pro",&index);
+ err = OMXhandle->getParameter(
+ nodeID, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ CHECK_EQ(err, (status_t)OK);
+ if(version==kTypeWMA) {
+ paramWMA.nChannels = numChannels;
+ paramWMA.nSamplingRate = sampleRate;
+ paramWMA.nEncodeOptions = encodeOptions;
+ paramWMA.nBitRate = bitRate;
+ paramWMA.nBlockAlign = blockAlign;
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ paramWMA10.nChannels = numChannels;
+ paramWMA10.nSamplingRate = sampleRate;
+ paramWMA10.nEncodeOptions = encodeOptions;
+ paramWMA10.nBitRate = bitRate;
+ paramWMA10.nBlockAlign = blockAlign;
+ }
+ if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ paramWMA10.advancedEncodeOpt = advencopt1;
+ paramWMA10.advancedEncodeOpt2 = advencopt2;
+ paramWMA10.formatTag = formattag;
+ paramWMA10.validBitsPerSample = bitspersample;
+ paramWMA10.nVirtualPktSize = VirtualPktSize;
+ }
+ if(version==kTypeWMA) {
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamAudioWma, &paramWMA, sizeof(paramWMA));
+ } else if(version==kTypeWMAPro || version==kTypeWMALossLess) {
+ err = OMXhandle->setParameter(
+ nodeID, index, &paramWMA10, sizeof(paramWMA10));
+ }
+ return err;
+ }
+ return OK;
+}
+
+
+void QCOMXCodec::setAC3Format(int32_t numChannels, int32_t sampleRate, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID) {
+ QOMX_AUDIO_PARAM_AC3TYPE profileAC3;
+ QOMX_AUDIO_PARAM_AC3PP profileAC3PP;
+ OMX_INDEXTYPE indexTypeAC3;
+ OMX_INDEXTYPE indexTypeAC3PP;
+ OMX_PARAM_PORTDEFINITIONTYPE portParam;
+
+ //configure input port
+ ALOGV("setAC3Format samplerate %d, numChannels %d", sampleRate, numChannels);
+ InitOMXParams(&portParam);
+ portParam.nPortIndex = 0;
+ status_t err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+
+ //configure output port
+ portParam.nPortIndex = 1;
+ err = OMXhandle->getParameter(
+ nodeID, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+ err = OMXhandle->setParameter(
+ nodeID, OMX_IndexParamPortDefinition, &portParam, sizeof(portParam));
+ CHECK_EQ(err, (status_t)OK);
+
+ err = OMXhandle->getExtensionIndex(nodeID, OMX_QCOM_INDEX_PARAM_AC3TYPE, &indexTypeAC3);
+
+ InitOMXParams(&profileAC3);
+ profileAC3.nPortIndex = OMXCodec::kPortIndexInput;
+ err = OMXhandle->getParameter(nodeID, indexTypeAC3, &profileAC3, sizeof(profileAC3));
+ CHECK_EQ(err,(status_t)OK);
+
+ profileAC3.nSamplingRate = sampleRate;
+ profileAC3.nChannels = 2;
+ profileAC3.eChannelConfig = OMX_AUDIO_AC3_CHANNEL_CONFIG_2_0;
+
+ ALOGV("numChannels = %d, profileAC3.nChannels = %d", numChannels, profileAC3.nChannels);
+
+ err = OMXhandle->setParameter(nodeID, indexTypeAC3, &profileAC3, sizeof(profileAC3));
+ CHECK_EQ(err,(status_t)OK);
+
+ //for output port
+ OMX_AUDIO_PARAM_PCMMODETYPE profilePcm;
+ InitOMXParams(&profilePcm);
+ profilePcm.nPortIndex = OMXCodec::kPortIndexOutput;
+ err = OMXhandle->getParameter(nodeID, OMX_IndexParamAudioPcm, &profilePcm, sizeof(profilePcm));
+ CHECK_EQ(err, (status_t)OK);
+
+ profilePcm.nSamplingRate = sampleRate;
+ err = OMXhandle->setParameter(nodeID, OMX_IndexParamAudioPcm, &profilePcm, sizeof(profilePcm));
+ CHECK_EQ(err, (status_t)OK);
+ OMXhandle->getExtensionIndex(nodeID, OMX_QCOM_INDEX_PARAM_AC3PP, &indexTypeAC3PP);
+
+ InitOMXParams(&profileAC3PP);
+ profileAC3PP.nPortIndex = OMXCodec::kPortIndexInput;
+ err = OMXhandle->getParameter(nodeID, indexTypeAC3PP, &profileAC3PP, sizeof(profileAC3PP));
+ CHECK_EQ(err, (status_t)OK);
+
+ int i;
+ int channel_routing[6];
+
+ for (i=0; i<6; i++) {
+ channel_routing[i] = -1;
+ }
+ for (i=0; i<6; i++) {
+ profileAC3PP.eChannelRouting[i] = (OMX_AUDIO_AC3_CHANNEL_ROUTING)channel_routing[i];
+ }
+
+ profileAC3PP.eChannelRouting[0] = OMX_AUDIO_AC3_CHANNEL_LEFT;
+ profileAC3PP.eChannelRouting[1] = OMX_AUDIO_AC3_CHANNEL_RIGHT;
+ err = OMXhandle->setParameter(nodeID, indexTypeAC3PP, &profileAC3PP, sizeof(profileAC3PP));
+ CHECK_EQ(err, (status_t)OK);
+}
+
+
+status_t QCOMXCodec::setQCVideoInputFormat(const char *mime, OMX_VIDEO_CODINGTYPE *compressionFormat) {
+ status_t retVal = OK;
+ if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime)){
+ *compressionFormat= (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX4, mime)){
+ *compressionFormat= (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX311, mime)){
+ *compressionFormat= (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_WMV, mime)){
+ *compressionFormat = OMX_VIDEO_CodingWMV;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_CONTAINER_MPEG2, mime)){
+ *compressionFormat = OMX_VIDEO_CodingMPEG2;
+ } else {
+ retVal = BAD_VALUE;
+ }
+
+ return retVal;
+}
+
+status_t QCOMXCodec::setQCVideoOutputFormat(const char *mime, OMX_VIDEO_CODINGTYPE *compressionFormat) {
+ status_t retVal = OK;
+ if(!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if(!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX311, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if(!strcasecmp(MEDIA_MIMETYPE_VIDEO_DIVX4, mime)) {
+ *compressionFormat = (OMX_VIDEO_CODINGTYPE)QOMX_VIDEO_CodingDivx;
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_WMV, mime)){
+ *compressionFormat = OMX_VIDEO_CodingWMV;
+ } else {
+ retVal = BAD_VALUE;
+ }
+ return retVal;
+}
+
+
+void QCOMXCodec::checkQCRole( const sp<IOMX> &omx, IOMX::node_id node,
+ bool isEncoder, const char *mime){
+ ALOGV("checkQCRole Called");
+ struct MimeToRole {
+ const char *mime;
+ const char *decoderRole;
+ const char *encoderRole;
+ };
+
+ static const MimeToRole kQCMimeToRole[] = {
+ { MEDIA_MIMETYPE_AUDIO_EVRC,
+ "audio_decoder.evrchw", "audio_encoder.evrc" },
+ { MEDIA_MIMETYPE_AUDIO_QCELP,
+ "audio_decoder,qcelp13Hw", "audio_encoder.qcelp13" },
+ { MEDIA_MIMETYPE_VIDEO_DIVX,
+ "video_decoder.divx", NULL },
+ { MEDIA_MIMETYPE_AUDIO_AC3,
+ "audio_decoder.ac3", NULL },
+ { MEDIA_MIMETYPE_VIDEO_DIVX311,
+ "video_decoder.divx", NULL },
+ };
+
+ static const size_t kNumMimeToRole =
+ sizeof(kQCMimeToRole) / sizeof(kQCMimeToRole[0]);
+
+ size_t i;
+ for (i = 0; i < kNumMimeToRole; ++i) {
+ if (!strcasecmp(mime, kQCMimeToRole[i].mime)) {
+ break;
+ }
+ }
+
+ if (i == kNumMimeToRole) {
+ return;
+ }
+
+ const char *role =
+ isEncoder ? kQCMimeToRole[i].encoderRole
+ : kQCMimeToRole[i].decoderRole;
+
+ if (role != NULL) {
+ OMX_PARAM_COMPONENTROLETYPE roleParams;
+ InitOMXParams(&roleParams);
+
+ strncpy((char *)roleParams.cRole,
+ role, OMX_MAX_STRINGNAME_SIZE - 1);
+
+ roleParams.cRole[OMX_MAX_STRINGNAME_SIZE - 1] = '\0';
+
+ status_t err = omx->setParameter(
+ node, OMX_IndexParamStandardComponentRole,
+ &roleParams, sizeof(roleParams));
+
+ if (err != OK) {
+ ALOGW("Failed to set standard component role '%s'.", role);
+ }
+ }
+
+}
+
+status_t QCOMXCodec::checkQCFormats(int format, AString* meta){
+ ALOGV("checkQCFormats called");
+ status_t retVal = OK;
+ if (format == OMX_AUDIO_CodingQCELP13 ) {
+ *meta = MEDIA_MIMETYPE_AUDIO_QCELP;
+ } else if(format == OMX_AUDIO_CodingEVRC ) {
+ *meta = MEDIA_MIMETYPE_AUDIO_EVRC;
+ } else {
+ retVal = BAD_VALUE;
+ }
+ return retVal;
+}
+
+void QCOMXCodec::setQCSpecificVideoFormat(const sp<MetaData> &meta, sp<IOMX> OMXhandle,
+ IOMX::node_id nodeID, char* componentName ) {
+ int32_t arbitraryMode = 1;
+ bool success = meta->findInt32(kKeyUseArbitraryMode, &arbitraryMode);
+ bool useArbitraryMode = true;
+ if (success) {
+ useArbitraryMode = arbitraryMode ? true : false;
+ }
+
+ if (useArbitraryMode) {
+ ALOGI("Decoder should be in arbitrary mode");
+ // Is it required to set OMX_QCOM_FramePacking_Arbitrary ??
+ } else{
+ ALOGI("Enable frame by frame mode");
+ OMX_QCOM_PARAM_PORTDEFINITIONTYPE portFmt;
+ portFmt.nPortIndex = OMXCodec::kPortIndexInput;
+ portFmt.nFramePackingFormat = OMX_QCOM_FramePacking_OnlyOneCompleteFrame;
+ status_t err = OMXhandle->setParameter(
+ nodeID, (OMX_INDEXTYPE)OMX_QcomIndexPortDefn, (void *)&portFmt, sizeof(portFmt));
+ if(err != OK) {
+ ALOGW("Failed to set frame packing format on component");
+ }
+ }
+
+ // Enable timestamp reordering only for AVI/mpeg4 and vc1 clips
+ const char *fileFormat;
+ success = meta->findCString(kKeyFileFormat, &fileFormat);
+ if (!strcmp(componentName, "OMX.qcom.video.decoder.vc1") ||
+ (success && !strncmp(fileFormat, "video/avi", 9))) {
+ ALOGI("Enabling timestamp reordering");
+ QOMX_INDEXTIMESTAMPREORDER reorder;
+ InitOMXParams(&reorder);
+ reorder.nPortIndex = OMXCodec::kPortIndexOutput;
+ reorder.bEnable = OMX_TRUE;
+ status_t err = OMXhandle->setParameter(nodeID,
+ (OMX_INDEXTYPE)OMX_QcomIndexParamEnableTimeStampReorder,
+ (void *)&reorder, sizeof(reorder));
+
+ if(err != OK) {
+ ALOGW("Failed to enable timestamp reordering");
+ }
+ }
+}
+
+}
diff --git a/media/libstagefright/StagefrightMediaScanner.cpp b/media/libstagefright/StagefrightMediaScanner.cpp
index b7cf96e..510252a 100644
--- a/media/libstagefright/StagefrightMediaScanner.cpp
+++ b/media/libstagefright/StagefrightMediaScanner.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,7 +43,12 @@ static bool FileHasAcceptableExtension(const char *extension) {
".mpeg", ".ogg", ".mid", ".smf", ".imy", ".wma", ".aac",
".wav", ".amr", ".midi", ".xmf", ".rtttl", ".rtx", ".ota",
".mkv", ".mka", ".webm", ".ts", ".fl", ".flac", ".mxmf",
- ".avi", ".mpeg", ".mpg"
+ ".avi", ".mpg",
+#ifndef QCOM_HARDWARE
+ ".mpeg"
+#else
+ ".qcp", ".awb", ".ac3", ".dts", ".wmv"
+#endif
};
static const size_t kNumValidExtensions =
sizeof(kValidExtensions) / sizeof(kValidExtensions[0]);
diff --git a/media/libstagefright/TunnelPlayer.cpp b/media/libstagefright/TunnelPlayer.cpp
new file mode 100644
index 0000000..564f916
--- /dev/null
+++ b/media/libstagefright/TunnelPlayer.cpp
@@ -0,0 +1,904 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
+ * Not a Contribution, Apache license notifications and license are retained
+ * for attribution purposes only.
+ *
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+//#define LOG_NDDEBUG 0
+#define LOG_TAG "TunnelPlayer"
+#include <utils/Log.h>
+#include <utils/threads.h>
+
+#include <signal.h>
+#include <sys/prctl.h>
+#include <sys/resource.h>
+#include <sys/poll.h>
+#include <sys/eventfd.h>
+#include <binder/IPCThreadState.h>
+#include <media/AudioTrack.h>
+
+#include <media/stagefright/TunnelPlayer.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include <hardware_legacy/power.h>
+
+#include <linux/unistd.h>
+
+#include "include/AwesomePlayer.h"
+#include <powermanager/PowerManager.h>
+
+static const char mName[] = "TunnelPlayer";
+#define MEM_METADATA_SIZE 64
+#define MEM_PADDING 64
+#define MEM_BUFFER_SIZE (256*1024 - MEM_METADATA_SIZE)
+#define MEM_BUFFER_COUNT 4
+#define TUNNEL_BUFFER_TIME 1500000
+
+namespace android {
+int TunnelPlayer::mTunnelObjectsAlive = 0;
+
+TunnelPlayer::TunnelPlayer(
+ const sp<MediaPlayerBase::AudioSink> &audioSink, bool &initCheck,
+ AwesomePlayer *observer, bool hasVideo)
+:AudioPlayer(audioSink,observer),
+mPositionTimeMediaUs(-1),
+mPositionTimeRealUs(-1),
+mInternalSeeking(false),
+mStarted(false),
+mA2DPEnabled(false),
+mSampleRate(0),
+mLatencyUs(0),
+mFrameSize(0),
+mNumFramesPlayed(0),
+mNumFramesPlayedSysTimeUs(0),
+mInputBuffer(NULL),
+mSeeking(false),
+mReachedEOS(false),
+mReachedOutputEOS(false),
+mFinalStatus(OK),
+mSeekTimeUs(0),
+mPauseTime(0),
+mIsFirstBuffer(false),
+mFirstBufferResult(OK),
+mFirstBuffer(NULL),
+mAudioSink(audioSink),
+mObserver(observer) {
+ ALOGD("TunnelPlayer::TunnelPlayer()");
+ mTunnelObjectsAlive++;
+ numChannels = 0;
+ mPaused = false;
+ mIsA2DPEnabled = false;
+ mAudioFlinger = NULL;
+ mAudioFlingerClient = NULL;
+ mFormat = AUDIO_FORMAT_MP3;
+ mQueue.start();
+ mQueueStarted = true;
+ mPauseEvent = new TunnelEvent(this, &TunnelPlayer::onPauseTimeOut);
+ mPauseEventPending = false;
+
+ //getAudioFlinger();
+ //ALOGD("Registering client with AudioFlinger");
+ //mAudioFlinger->registerClient(mAudioFlingerClient);
+
+ mSeekTimeUs = 0;
+ mIsAudioRouted = false;
+
+ mHasVideo = hasVideo;
+ initCheck = true;
+
+ //mDeathRecipient = new PMDeathRecipient(this);
+}
+void TunnelPlayer::acquireWakeLock()
+{
+ /*Mutex::Autolock _l(pmLock);
+
+ if (mPowerManager == 0) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == 0) {
+ ALOGW("Thread %s cannot connect to the power manager service", mName);
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
+ }
+ if (mPowerManager != 0 && mWakeLockToken == 0) {
+ sp<IBinder> binder = new BBinder();
+ status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
+ binder,
+ String16(mName));
+ if (status == NO_ERROR) {
+ mWakeLockToken = binder;
+ }
+ ALOGV("acquireWakeLock() %s status %d", mName, status);
+ }*/
+}
+
+void TunnelPlayer::releaseWakeLock()
+{
+ /*Mutex::Autolock _l(pmLock);
+
+ if (mWakeLockToken != 0) {
+ ALOGV("releaseWakeLock() %s", mName);
+ if (mPowerManager != 0) {
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ }
+ mWakeLockToken.clear();
+ }*/
+}
+
+void TunnelPlayer::clearPowerManager()
+{
+ Mutex::Autolock _l(pmLock);
+ releaseWakeLock();
+ mPowerManager.clear();
+}
+
+void TunnelPlayer::PMDeathRecipient::binderDied(const wp<IBinder>& who)
+{
+ parentClass->clearPowerManager();
+ ALOGW("power manager service died !!!");
+}
+
+TunnelPlayer::~TunnelPlayer() {
+ ALOGD("TunnelPlayer::~TunnelPlayer()");
+ if (mQueueStarted) {
+ mQueue.stop();
+ }
+
+ reset();
+ //mAudioFlinger->deregisterClient(mAudioFlingerClient);
+ mTunnelObjectsAlive--;
+
+ releaseWakeLock();
+ if (mPowerManager != 0) {
+ sp<IBinder> binder = mPowerManager->asBinder();
+ binder->unlinkToDeath(mDeathRecipient);
+ }
+
+
+}
+
+void TunnelPlayer::getAudioFlinger() {
+/* Mutex::Autolock _l(mAudioFlingerLock);
+
+ if ( mAudioFlinger.get() == 0 ) {
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder;
+ do {
+ binder = sm->getService(String16("media.audio_flinger"));
+ if ( binder != 0 )
+ break;
+ ALOGW("AudioFlinger not published, waiting...");
+ usleep(500000); // 0.5 s
+ } while ( true );
+ if ( mAudioFlingerClient == NULL ) {
+ mAudioFlingerClient = new AudioFlingerTunnelDecodeClient(this);
+ }
+
+ binder->linkToDeath(mAudioFlingerClient);
+ mAudioFlinger = interface_cast<IAudioFlinger>(binder);
+ }
+ ALOGE_IF(mAudioFlinger==0, "no AudioFlinger!?");*/
+}
+
+/*TunnelPlayer::AudioFlingerTunnelDecodeClient::AudioFlingerTunnelDecodeClient(void *obj)
+{
+ ALOGD("TunnelPlayer::AudioFlingerTunnelDecodeClient - Constructor");
+ pBaseClass = (TunnelPlayer*)obj;
+}
+
+void TunnelPlayer::AudioFlingerTunnelDecodeClient::binderDied(const wp<IBinder>& who) {
+ Mutex::Autolock _l(pBaseClass->mAudioFlingerLock);
+
+ pBaseClass->mAudioFlinger.clear();
+ ALOGW("AudioFlinger server died!");
+}*/
+
+/*void TunnelPlayer::AudioFlingerTunnelDecodeClient::ioConfigChanged(int event, int ioHandle, void *param2) {
+ ALOGV("ioConfigChanged() event %d", event);
+
+
+ if (event != AudioSystem::A2DP_OUTPUT_STATE) {
+ return;
+ }
+
+ switch ( event ) {
+ case AudioSystem::A2DP_OUTPUT_STATE:
+ {
+ if ( -1 == ioHandle ) {
+ if ( pBaseClass->mIsA2DPEnabled ) {
+ pBaseClass->mIsA2DPEnabled = false;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+ ALOGV("ioConfigChanged:: A2DP Disabled");
+ }
+ } else {
+ if ( !pBaseClass->mIsA2DPEnabled ) {
+ pBaseClass->mIsA2DPEnabled = true;
+ if (pBaseClass->mStarted) {
+ pBaseClass->handleA2DPSwitch();
+ }
+ ALOGV("ioConfigChanged:: A2DP Enabled");
+ }
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ ALOGV("ioConfigChanged Out");
+}*/
+
+/*void TunnelPlayer::handleA2DPSwitch() {
+ //TODO: Implement
+}
+*/
+
+void TunnelPlayer::setSource(const sp<MediaSource> &source) {
+ CHECK(mSource == NULL);
+ ALOGD("Setting source from Tunnel Player");
+ mSource = source;
+}
+
+status_t TunnelPlayer::start(bool sourceAlreadyStarted) {
+ CHECK(!mStarted);
+ CHECK(mSource != NULL);
+
+ ALOGV("start: sourceAlreadyStarted %d", sourceAlreadyStarted);
+ //Check if the source is started, start it
+ status_t err;
+ if (!sourceAlreadyStarted) {
+ err = mSource->start();
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ //Create decoder and a2dp notification thread and initialize all the
+ //mutexes and coditional variables
+ createThreads();
+ ALOGV("All Threads Created.");
+
+ // We allow an optional INFO_FORMAT_CHANGED at the very beginning
+ // of playback, if there is one, getFormat below will retrieve the
+ // updated format, if there isn't, we'll stash away the valid buffer
+ // of data to be used on the first audio callback.
+
+ CHECK(mFirstBuffer == NULL);
+
+ MediaSource::ReadOptions options;
+ if (mSeeking) {
+ options.setSeekTo(mSeekTimeUs);
+ mSeeking = false;
+ }
+
+ mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
+ if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
+ ALOGV("INFO_FORMAT_CHANGED!!!");
+ CHECK(mFirstBuffer == NULL);
+ mFirstBufferResult = OK;
+ mIsFirstBuffer = false;
+ } else {
+ mIsFirstBuffer = true;
+ }
+
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ if (!strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AAC)) {
+ mFormat = AUDIO_FORMAT_AAC;
+ }
+ if (!strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
+ mFormat = AUDIO_FORMAT_AMR_WB;
+ ALOGV("TunnelPlayer::start AUDIO_FORMAT_AMR_WB");
+ }
+ if (!strcasecmp(mime,MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)) {
+ mFormat = AUDIO_FORMAT_AMR_WB_PLUS;
+ ALOGV("TunnelPlayer::start AUDIO_FORMAT_AMR_WB_PLUS");
+ }
+
+ CHECK(success);
+
+ success = format->findInt32(kKeySampleRate, &mSampleRate);
+ CHECK(success);
+
+ success = format->findInt32(kKeyChannelCount, &numChannels);
+ CHECK(success);
+
+ if(!format->findInt32(kKeyChannelMask, &mChannelMask)) {
+ // log only when there's a risk of ambiguity of channel mask selection
+ ALOGI_IF(numChannels > 2,
+ "source format didn't specify channel mask, using (%d) channel order", numChannels);
+ mChannelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
+ }
+ audio_output_flags_t flags = (audio_output_flags_t) (AUDIO_OUTPUT_FLAG_TUNNEL |
+ AUDIO_OUTPUT_FLAG_DIRECT);
+ ALOGV("mAudiosink->open() mSampleRate %d, numChannels %d, mChannelMask %d, flags %d",mSampleRate, numChannels, mChannelMask, flags);
+ err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, mFormat,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &TunnelPlayer::AudioSinkCallback,
+ this,
+ (mA2DPEnabled ? AUDIO_OUTPUT_FLAG_NONE : flags));
+
+ if (err != OK) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (!sourceAlreadyStarted) {
+ mSource->stop();
+ }
+
+ ALOGE("Opening a routing session failed");
+ return err;
+ }
+
+ if (!mIsA2DPEnabled) {
+ acquireWakeLock();
+ }
+
+ mIsAudioRouted = true;
+ mStarted = true;
+ mAudioSink->start();
+ ALOGV("Waking up extractor thread");
+ pthread_cond_signal(&extractor_cv);
+
+ return OK;
+}
+
+status_t TunnelPlayer::seekTo(int64_t time_us) {
+
+ ALOGV("seekTo: time_us %lld", time_us);
+
+ //This can happen if the client calls seek
+ //without ever calling getPosition
+ if (mPositionTimeRealUs == -1) {
+ getOffsetRealTime_l(&mPositionTimeRealUs);
+ }
+
+ if (mPositionTimeRealUs > 0) {
+ //check for return conditions only if seektime
+ // is set
+ if (time_us > mPositionTimeRealUs){
+ if((time_us - mPositionTimeRealUs) < TUNNEL_BUFFER_TIME){
+ ALOGV("In seekTo(), ignoring time_us %lld mSeekTimeUs %lld", time_us, mSeekTimeUs);
+ mObserver->postAudioSeekComplete();
+ return OK;
+ }
+ } else {
+ if((mPositionTimeRealUs - time_us) < TUNNEL_BUFFER_TIME){
+ ALOGV("In seekTo(), ignoring time_us %lld mSeekTimeUs %lld", time_us, mSeekTimeUs);
+ mObserver->postAudioSeekComplete();
+ return OK;
+ }
+ }
+ }
+
+ mSeeking = true;
+ mSeekTimeUs = time_us;
+ mPauseTime = mSeekTimeUs;
+ ALOGV("In seekTo(), mSeekTimeUs %lld",mSeekTimeUs);
+
+ if (mIsAudioRouted) {
+ mAudioSink->flush();
+ }
+
+ if (mReachedEOS) {
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ if(mPaused == false) {
+ ALOGV("Going to signal extractor thread since playback is already going on ");
+ pthread_cond_signal(&extractor_cv);
+ ALOGV("Signalled extractor thread.");
+ }
+ }
+ ALOGV("seek done.");
+ return OK;
+}
+void TunnelPlayer::pause(bool playPendingSamples) {
+ CHECK(mStarted);
+ if (mPaused) {
+ return;
+ }
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("pause: playPendingSamples %d", playPendingSamples);
+ mPaused = true;
+ int64_t playedTime = 0;
+ if(!mPauseEventPending) {
+ ALOGV("Posting an event for Pause timeout");
+ mQueue.postEventWithDelay(mPauseEvent, TUNNEL_PAUSE_TIMEOUT_USEC);
+ mPauseEventPending = true;
+ }
+ getPlayedTimeFromDSP_l(&playedTime);
+ mPauseTime = mSeekTimeUs + playedTime;
+ if (mAudioSink.get() != NULL) {
+ ALOGV("AudioSink pause");
+ mAudioSink->pause();
+ }
+}
+
+void TunnelPlayer::resume() {
+ Mutex::Autolock autoLock(mLock);
+ ALOGV("resume: mPaused %d",mPaused);
+ if ( mPaused) {
+ CHECK(mStarted);
+ if (!mIsA2DPEnabled) {
+ if(mPauseEventPending) {
+ ALOGV("Resume(): Cancelling the puaseTimeout event");
+ mPauseEventPending = false;
+ mQueue.cancelEvent(mPauseEvent->eventID());
+ }
+
+ }
+ audio_format_t format;
+
+ if (!mIsAudioRouted) {
+ audio_output_flags_t flags = (audio_output_flags_t) (AUDIO_OUTPUT_FLAG_TUNNEL |
+ AUDIO_OUTPUT_FLAG_DIRECT);
+ status_t err = mAudioSink->open(
+ mSampleRate, numChannels, mChannelMask, mFormat,
+ DEFAULT_AUDIOSINK_BUFFERCOUNT,
+ &TunnelPlayer::AudioSinkCallback,
+ this,
+ (mA2DPEnabled ? AUDIO_OUTPUT_FLAG_NONE : flags ));
+ if (err != NO_ERROR) {
+ ALOGE("Audio sink open failed.");
+ }
+ mIsAudioRouted = true;
+ }
+ mPaused = false;
+ ALOGV("Audio sink open succeeded.");
+ mAudioSink->start();
+ ALOGV("Audio sink start succeeded.");
+ pthread_cond_signal(&extractor_cv);
+ ALOGV("Audio signalling extractor thread.");
+ }
+}
+
+//static
+size_t TunnelPlayer::AudioSinkCallback(
+ MediaPlayerBase::AudioSink *audioSink,
+ void *buffer, size_t size, void *cookie) {
+ if (buffer == NULL && size == AudioTrack::EVENT_UNDERRUN) {
+ TunnelPlayer *me = (TunnelPlayer *)cookie;
+ if(me->mReachedEOS == true) {
+ //in the case of seek all these flags will be reset
+ me->mReachedOutputEOS = true;
+ ALOGV("postAudioEOS mSeeking %d", me->mSeeking);
+ me->mObserver->postAudioEOS(0);
+ }else {
+ ALOGV("postAudioEOS ignored since %d", me->mSeeking);
+ }
+ }
+ return 1;
+}
+
+void TunnelPlayer::reset() {
+ ALOGV("Reset");
+
+ mReachedEOS = true;
+
+ // make sure Decoder thread has exited
+ requestAndWaitForExtractorThreadExit();
+
+ // Close the audiosink after all the threads exited to make sure
+ if (mIsAudioRouted) {
+ mAudioSink->stop();
+ mAudioSink->close();
+ mIsAudioRouted = false;
+ }
+ //TODO: Release Wake lock
+
+ // Make sure to release any buffer we hold onto so that the
+ // source is able to stop().
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+
+ if (mInputBuffer != NULL) {
+ ALOGV("AudioPlayer releasing input buffer.");
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ if (mStarted)
+ mSource->stop();
+
+ mSource.clear();
+
+ mPositionTimeMediaUs = -1;
+ mPositionTimeRealUs = -1;
+ mSeeking = false;
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+ mFinalStatus = OK;
+ mStarted = false;
+}
+
+
+bool TunnelPlayer::isSeeking() {
+ Mutex::Autolock autoLock(mLock);
+ return mSeeking;
+}
+
+bool TunnelPlayer::reachedEOS(status_t *finalStatus) {
+ *finalStatus = OK;
+ Mutex::Autolock autoLock(mLock);
+ *finalStatus = mFinalStatus;
+ return mReachedOutputEOS;
+}
+
+
+void *TunnelPlayer::extractorThreadWrapper(void *me) {
+ static_cast<TunnelPlayer *>(me)->extractorThreadEntry();
+ return NULL;
+}
+
+
+void TunnelPlayer::extractorThreadEntry() {
+
+ pthread_mutex_lock(&extractor_mutex);
+ uint32_t BufferSizeToUse = MEM_BUFFER_SIZE;
+
+ pid_t tid = gettid();
+ androidSetThreadPriority(tid, ANDROID_PRIORITY_AUDIO);
+ prctl(PR_SET_NAME, (unsigned long)"Extractor Thread", 0, 0, 0);
+
+ ALOGV("extractorThreadEntry wait for signal \n");
+ if (!mStarted) {
+ pthread_cond_wait(&extractor_cv, &extractor_mutex);
+ }
+ ALOGV("extractorThreadEntry ready to work \n");
+ pthread_mutex_unlock(&extractor_mutex);
+ if (killExtractorThread) {
+ return;
+ }
+ if(mSource != NULL) {
+ sp<MetaData> format = mSource->getFormat();
+ const char *mime;
+ bool success = format->findCString(kKeyMIMEType, &mime);
+ }
+ void* local_buf = malloc(BufferSizeToUse + MEM_PADDING);
+ int *lptr = ((int*)local_buf);
+ int bytesWritten = 0;
+ bool lSeeking = false;
+ bool lPaused = false;
+ while (!killExtractorThread) {
+
+ if (mReachedEOS || mPaused || !mIsAudioRouted) {
+ ALOGV("Going to sleep before write since "
+ "mReachedEOS %d, mPaused %d, mIsAudioRouted %d",
+ mReachedEOS, mPaused, mIsAudioRouted);
+ pthread_mutex_lock(&extractor_mutex);
+ pthread_cond_wait(&extractor_cv, &extractor_mutex);
+ pthread_mutex_unlock(&extractor_mutex);
+ ALOGV("Woke up from sleep before write since "
+ "mReachedEOS %d, mPaused %d, mIsAudioRouted %d",
+ mReachedEOS, mPaused, mIsAudioRouted);
+ continue;
+ }
+
+ if (!mIsA2DPEnabled) {
+ ALOGV("FillBuffer: MemBuffer size %d", BufferSizeToUse);
+ ALOGV("Fillbuffer started");
+ bytesWritten = fillBuffer(local_buf, BufferSizeToUse);
+ ALOGV("FillBuffer completed bytesToWrite %d", bytesWritten);
+ if(!killExtractorThread) {
+ mLock.lock();
+ lPaused = mPaused;
+ mLock.unlock();
+
+ if(lPaused == true) {
+ //write only if player is not in paused state. Sleep on lock
+ // resume is called
+ ALOGV("Going to sleep in decodethreadiwrite since sink is paused");
+ pthread_mutex_lock(&extractor_mutex);
+ pthread_cond_wait(&extractor_cv, &extractor_mutex);
+ ALOGV("Going to unlock n decodethreadwrite since sink "
+ "resumed mPaused %d, mIsAudioRouted %d, mReachedEOS %d",
+ mPaused, mIsAudioRouted, mReachedEOS);
+ pthread_mutex_unlock(&extractor_mutex);
+ }
+ mLock.lock();
+ lSeeking = mSeeking||mInternalSeeking;
+ mLock.unlock();
+
+ if(lSeeking == false && (killExtractorThread == false)){
+ //if we are seeking, ignore write, otherwise write
+ ALOGV("Fillbuffer before write %d and seek flag %d", mSeeking,
+ lptr[MEM_BUFFER_SIZE/sizeof(int)]);
+ int lWrittenBytes = mAudioSink->write(local_buf, bytesWritten);
+ ALOGV("Fillbuffer after write, written bytes %d and seek flag %d", lWrittenBytes, mSeeking);
+ if(lWrittenBytes > 0) {
+ //send EOS only if write was successful, if is_buffer_available
+ // is flushed out (which returns 0 do not SEND EOS
+ ALOGV("Fillbuffer after write and seek flag %d", mSeeking);
+ mLock.lock();
+ lSeeking = mSeeking||mInternalSeeking;
+ mLock.unlock();
+ //ignore posting zero length buffer is seeking is set
+ if(mReachedEOS && bytesWritten && !lSeeking && (killExtractorThread == false)) {
+ ALOGV("Fillbuffer after write sent EOS flag %d", lSeeking);
+ mAudioSink->write(local_buf, 0);
+ } else {
+ ALOGV("Not sending EOS buffer sent since seeking %d, "
+ "kill %d and mReachedEOS %d", \
+ lSeeking, killExtractorThread, mReachedEOS);
+ }
+ } else {
+ ALOGV("write exited because of flush %d", mSeeking);
+ }
+ } else {
+ ALOGV("Fillbuffer ignored since we seeked after fillBuffer was set %d", mSeeking);
+ }
+ }
+ }
+ }
+
+ free(local_buf);
+
+ //TODO: Call fillbuffer with different size and write to mAudioSink()
+}
+void TunnelPlayer::createThreads() {
+
+ //Initialize all the Mutexes and Condition Variables
+ pthread_mutex_init(&extractor_mutex, NULL);
+ pthread_cond_init (&extractor_cv, NULL);
+
+ // Create 4 threads Effect, decoder, event and A2dp
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ killExtractorThread = false;
+
+ extractorThreadAlive = true;
+
+ ALOGV("Creating Extractor Thread");
+ pthread_create(&extractorThread, &attr, extractorThreadWrapper, this);
+
+ pthread_attr_destroy(&attr);
+}
+size_t TunnelPlayer::fillBuffer(void *data, size_t size) {
+
+ if (mReachedEOS) {
+ return 0;
+ }
+
+ bool postSeekComplete = false;
+
+ size_t size_done = 0;
+ size_t size_remaining = size;
+ int *ldataptr = (int*) data;
+ //clear the flag since we dont know whether we are seeking or not, yet
+ ldataptr[(MEM_BUFFER_SIZE/sizeof(int))] = 0;
+ ALOGV("fillBuffer: Clearing seek flag in fill buffer");
+
+ while (size_remaining > 0) {
+ MediaSource::ReadOptions options;
+ {
+ Mutex::Autolock autoLock(mLock);
+ if(mSeeking) {
+ mInternalSeeking = false;
+ }
+
+ if (mSeeking || mInternalSeeking) {
+ if (mIsFirstBuffer) {
+ if (mFirstBuffer != NULL) {
+ mFirstBuffer->release();
+ mFirstBuffer = NULL;
+ }
+ mIsFirstBuffer = false;
+ }
+
+ MediaSource::ReadOptions::SeekMode seekMode;
+ seekMode = MediaSource::ReadOptions::SEEK_CLOSEST_SYNC;
+ options.setSeekTo(mSeekTimeUs, seekMode );
+ if (mInputBuffer != NULL) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ // This is to ignore the data already filled in the output buffer
+ size_done = 0;
+ size_remaining = size;
+
+ mSeeking = false;
+ if (mObserver && !mInternalSeeking) {
+ ALOGV("fillBuffer: Posting audio seek complete event");
+ postSeekComplete = true;
+ }
+ mInternalSeeking = false;
+ ALOGV("fillBuffer: Setting seek flag in fill buffer");
+ //set the flag since we know that this buffer is the new positions buffer
+ ldataptr[(MEM_BUFFER_SIZE/sizeof(int))] = 1;
+ }
+ }
+ if (mInputBuffer == NULL) {
+ status_t err;
+
+ if (mIsFirstBuffer) {
+ mInputBuffer = mFirstBuffer;
+ mFirstBuffer = NULL;
+ err = mFirstBufferResult;
+
+ mIsFirstBuffer = false;
+ } else {
+ err = mSource->read(&mInputBuffer, &options);
+ }
+
+ CHECK((err == OK && mInputBuffer != NULL)
+ || (err != OK && mInputBuffer == NULL));
+ {
+ Mutex::Autolock autoLock(mLock);
+
+ if (err != OK) {
+ ALOGD("fill buffer - reached eos true");
+ mReachedEOS = true;
+ mFinalStatus = err;
+ break;
+ }
+ }
+
+ }
+ if (mInputBuffer->range_length() == 0) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ continue;
+ }
+
+ size_t copy = size_remaining;
+ if (copy > mInputBuffer->range_length()) {
+ copy = mInputBuffer->range_length();
+ }
+ memcpy((char *)data + size_done,
+ (const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
+ copy);
+
+ mInputBuffer->set_range(mInputBuffer->range_offset() + copy,
+ mInputBuffer->range_length() - copy);
+
+ size_done += copy;
+ size_remaining -= copy;
+ }
+ if(mReachedEOS)
+ memset((char *)data + size_done, 0x0, size_remaining);
+ ALOGV("fill buffer size_done = %d",size_done);
+
+ if (postSeekComplete) {
+ mObserver->postAudioSeekComplete();
+ }
+
+ return size_done;
+}
+
+int64_t TunnelPlayer::getRealTimeUs() {
+ Mutex::Autolock autoLock(mLock);
+ getOffsetRealTime_l(&mPositionTimeRealUs);
+ //update media time too
+ mPositionTimeMediaUs = mPositionTimeRealUs;
+ return mPositionTimeRealUs;
+}
+
+void TunnelPlayer::getPlayedTimeFromDSP_l(int64_t* timeStamp ) {
+ ALOGV("going to query timestamp");
+ mAudioSink->getTimeStamp((uint64_t*)timeStamp);
+ ALOGV("timestamp returned from DSP %lld ", (*timeStamp));
+ return;
+}
+
+//offset with pause and seek time
+void TunnelPlayer::getOffsetRealTime_l(int64_t* offsetTime) {
+ if (mPaused) {
+ *offsetTime = mPauseTime;
+ ALOGV("getMediaTimeUs() mPaused %d mSeekTimeUs %lld mPauseTime %lld", mPaused, mSeekTimeUs, mPauseTime );
+ } else {
+ getPlayedTimeFromDSP_l(offsetTime);
+ ALOGV("getMediaTimeUs() mPaused %d mSeekTimeUs %lld mPauseTime %lld, timeStamp %lld", mPaused, mSeekTimeUs, mPauseTime, *offsetTime);
+ *offsetTime = mSeekTimeUs + *offsetTime;
+ }
+}
+
+int64_t TunnelPlayer::getMediaTimeUs() {
+ //essentially there is only one time, the real time
+ return getRealTimeUs();
+}
+
+bool TunnelPlayer::getMediaTimeMapping(
+ int64_t *realtime_us, int64_t *mediatime_us) {
+ Mutex::Autolock autoLock(mLock);
+
+ *realtime_us = mPositionTimeRealUs;
+ *mediatime_us = mPositionTimeMediaUs;
+
+ return mPositionTimeRealUs != -1 && mPositionTimeMediaUs != -1;
+}
+
+void TunnelPlayer::requestAndWaitForExtractorThreadExit() {
+
+ if (!extractorThreadAlive)
+ return;
+
+ killExtractorThread = true;
+
+ ALOGV("requestAndWaitForExtractorThreadExit +0");
+ if (mIsAudioRouted && !mReachedOutputEOS) {
+ mAudioSink->flush();
+ }
+
+ ALOGV("requestAndWaitForExtractorThreadExit +1");
+ pthread_cond_signal(&extractor_cv);
+ ALOGV("requestAndWaitForExtractorThreadExit +2");
+ pthread_join(extractorThread,NULL);
+ ALOGV("requestAndWaitForExtractorThreadExit +3");
+
+ ALOGV("Extractor thread killed");
+}
+
+void TunnelPlayer::onPauseTimeOut() {
+ Mutex::Autolock autoLock(mLock);
+ int64_t playedTime = 0;
+ ALOGV("onPauseTimeOut");
+ if (!mPauseEventPending) {
+ return;
+ }
+ mPauseEventPending = false;
+ if(!mIsA2DPEnabled) {
+ // 1.) Set seek flags
+ mReachedEOS = false;
+ mReachedOutputEOS = false;
+
+ if(mSeeking == false) {
+ ALOGV("onPauseTimeOut +2");
+ mInternalSeeking = true;
+ ALOGV("onPauseTimeOut +3");
+ getPlayedTimeFromDSP_l(&playedTime);
+ mSeekTimeUs += playedTime;
+ } else {
+ ALOGV("Do not update seek time if it was seeked before onpause timeout");
+ }
+
+ // 2.) Close routing Session
+ ALOGV("onPauseTimeOut +4");
+ mAudioSink->flush();
+ ALOGV("onPauseTimeOut +5");
+ mAudioSink->stop();
+ ALOGV("onPauseTimeOut +6");
+ mAudioSink->close();
+ ALOGV("onPauseTimeOut +7");
+ mIsAudioRouted = false;
+
+ // 3.) Release Wake Lock
+ releaseWakeLock();
+ }
+
+}
+
+} //namespace android
diff --git a/media/libstagefright/WAVEWriter.cpp b/media/libstagefright/WAVEWriter.cpp
new file mode 100644
index 0000000..9700fa7
--- /dev/null
+++ b/media/libstagefright/WAVEWriter.cpp
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "WAVEWriter"
+#include <utils/Log.h>
+
+#include <media/stagefright/WAVEWriter.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/mediarecorder.h>
+#include <sys/prctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+namespace android {
+
+static struct wav_header hdr;
+
+
+WAVEWriter::WAVEWriter(const char *filename)
+ : mFd(-1),
+ mInitCheck(NO_INIT),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false) {
+
+ mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+ if (mFd >= 0) {
+ mInitCheck = OK;
+ }
+}
+
+WAVEWriter::WAVEWriter(int fd)
+ : mFd(dup(fd)),
+ mInitCheck(mFd < 0? NO_INIT: OK),
+ mStarted(false),
+ mPaused(false),
+ mResumed(false) {
+}
+
+WAVEWriter::~WAVEWriter() {
+ if (mStarted) {
+ stop();
+ }
+
+ if (mFd != -1) {
+ close(mFd);
+ mFd = -1;
+ }
+}
+
+status_t WAVEWriter::initCheck() const {
+ return mInitCheck;
+}
+
+status_t WAVEWriter::addSource(const sp<MediaSource> &source) {
+ uint32_t count;
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource != NULL) {
+ ALOGE("A source already exists, return");
+ return UNKNOWN_ERROR;
+ }
+
+ sp<MetaData> meta = source->getFormat();
+
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
+
+ int32_t channelCount;
+ int32_t sampleRate;
+ CHECK(meta->findInt32(kKeyChannelCount, &channelCount));
+ CHECK(meta->findInt32(kKeySampleRate, &sampleRate));
+
+ memset(&hdr, 0, sizeof(struct wav_header));
+ hdr.riff_id = ID_RIFF;
+ hdr.riff_fmt = ID_WAVE;
+ hdr.fmt_id = ID_FMT;
+ hdr.fmt_sz = 16;
+ hdr.audio_format = FORMAT_PCM;
+ hdr.num_channels = channelCount;
+ hdr.sample_rate = sampleRate;
+ hdr.bits_per_sample = 16;
+ hdr.byte_rate = (sampleRate * channelCount * hdr.bits_per_sample) / 8;
+ hdr.block_align = ( hdr.bits_per_sample * channelCount ) / 8;
+ hdr.data_id = ID_DATA;
+ hdr.data_sz = 0;
+ hdr.riff_sz = hdr.data_sz + 44 - 8;
+
+ if (write(mFd, &hdr, sizeof(hdr)) != sizeof(hdr)) {
+ ALOGE("Write header error, return ERROR_IO");
+ return -ERROR_IO;
+ }
+
+ mSource = source;
+
+ return OK;
+}
+
+status_t WAVEWriter::start(MetaData *params) {
+ if (mInitCheck != OK) {
+ ALOGE("Init Check not OK, return");
+ return mInitCheck;
+ }
+
+ if (mSource == NULL) {
+ ALOGE("NULL Source");
+ return UNKNOWN_ERROR;
+ }
+
+ if (mStarted && mPaused) {
+ mPaused = false;
+ mResumed = true;
+ return OK;
+ } else if (mStarted) {
+ ALOGE("Already startd, return");
+ return OK;
+ }
+
+ status_t err = mSource->start();
+
+ if (err != OK) {
+ return err;
+ }
+
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+
+ mReachedEOS = false;
+ mDone = false;
+
+ pthread_create(&mThread, &attr, ThreadWrapper, this);
+ pthread_attr_destroy(&attr);
+
+ mStarted = true;
+
+ return OK;
+}
+
+status_t WAVEWriter::pause() {
+ if (!mStarted) {
+ return OK;
+ }
+ mPaused = true;
+ return OK;
+}
+
+status_t WAVEWriter::stop() {
+ if (!mStarted) {
+ return OK;
+ }
+
+ mDone = true;
+
+ void *dummy;
+ pthread_join(mThread, &dummy);
+
+ status_t err = (status_t) dummy;
+ {
+ status_t status = mSource->stop();
+ if (err == OK &&
+ (status != OK && status != ERROR_END_OF_STREAM)) {
+ err = status;
+ }
+ }
+
+ mStarted = false;
+ return err;
+}
+
+bool WAVEWriter::exceedsFileSizeLimit() {
+ if (mMaxFileSizeLimitBytes == 0) {
+ return false;
+ }
+ return mEstimatedSizeBytes >= mMaxFileSizeLimitBytes;
+}
+
+bool WAVEWriter::exceedsFileDurationLimit() {
+ if (mMaxFileDurationLimitUs == 0) {
+ return false;
+ }
+ return mEstimatedDurationUs >= mMaxFileDurationLimitUs;
+}
+
+// static
+void *WAVEWriter::ThreadWrapper(void *me) {
+ return (void *) static_cast<WAVEWriter *>(me)->threadFunc();
+}
+
+status_t WAVEWriter::threadFunc() {
+ mEstimatedDurationUs = 0;
+ mEstimatedSizeBytes = 0;
+ bool stoppedPrematurely = true;
+ int64_t previousPausedDurationUs = 0;
+ int64_t maxTimestampUs = 0;
+ status_t err = OK;
+
+ prctl(PR_SET_NAME, (unsigned long)"WAVEWriter", 0, 0, 0);
+ hdr.data_sz = 0;
+ while (!mDone) {
+ MediaBuffer *buffer;
+ err = mSource->read(&buffer);
+
+ if (err != OK) {
+ break;
+ }
+
+ if (mPaused) {
+ buffer->release();
+ buffer = NULL;
+ continue;
+ }
+
+ mEstimatedSizeBytes += buffer->range_length();
+ if (exceedsFileSizeLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
+ break;
+ }
+
+ int64_t timestampUs;
+ CHECK(buffer->meta_data()->findInt64(kKeyTime, &timestampUs));
+ if (timestampUs > mEstimatedDurationUs) {
+ mEstimatedDurationUs = timestampUs;
+ }
+ if (mResumed) {
+ previousPausedDurationUs += (timestampUs - maxTimestampUs - 20000);
+ mResumed = false;
+ }
+ timestampUs -= previousPausedDurationUs;
+ ALOGV("time stamp: %lld, previous paused duration: %lld",
+ timestampUs, previousPausedDurationUs);
+ if (timestampUs > maxTimestampUs) {
+ maxTimestampUs = timestampUs;
+ }
+
+ if (exceedsFileDurationLimit()) {
+ buffer->release();
+ buffer = NULL;
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
+ break;
+ }
+ ssize_t n = write(mFd,
+ (const uint8_t *)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+
+ hdr.data_sz += (ssize_t)buffer->range_length();
+ hdr.riff_sz = hdr.data_sz + 44 - 8;
+
+ if (n < (ssize_t)buffer->range_length()) {
+ buffer->release();
+ buffer = NULL;
+
+ break;
+ }
+
+ if (stoppedPrematurely) {
+ stoppedPrematurely = false;
+ }
+
+ buffer->release();
+ buffer = NULL;
+ }
+
+ if (stoppedPrematurely) {
+ notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_TRACK_INFO_COMPLETION_STATUS, UNKNOWN_ERROR);
+ }
+
+ lseek(mFd, 0, SEEK_SET);
+ write(mFd, &hdr, sizeof(hdr));
+ lseek(mFd, 0, SEEK_END);
+
+ close(mFd);
+ mFd = -1;
+ mReachedEOS = true;
+ if (err == ERROR_END_OF_STREAM) {
+ return OK;
+ }
+ return err;
+}
+
+bool WAVEWriter::reachedEOS() {
+ return mReachedEOS;
+}
+
+} // namespace android
diff --git a/media/libstagefright/WVMExtractor.cpp b/media/libstagefright/WVMExtractor.cpp
index 31b2bcf..5ae80cc 100644
--- a/media/libstagefright/WVMExtractor.cpp
+++ b/media/libstagefright/WVMExtractor.cpp
@@ -72,15 +72,18 @@ WVMExtractor::WVMExtractor(const sp<DataSource> &source)
}
}
-bool WVMExtractor::getVendorLibHandle()
+static void init_routine()
{
- if (gVendorLibHandle == NULL) {
- gVendorLibHandle = dlopen("libwvm.so", RTLD_NOW);
- }
-
+ gVendorLibHandle = dlopen("libwvm.so", RTLD_NOW);
if (gVendorLibHandle == NULL) {
ALOGE("Failed to open libwvm.so");
}
+}
+
+bool WVMExtractor::getVendorLibHandle()
+{
+ static pthread_once_t sOnceControl = PTHREAD_ONCE_INIT;
+ pthread_once(&sOnceControl, init_routine);
return gVendorLibHandle != NULL;
}
diff --git a/media/libstagefright/codecs/mp3dec/Android.mk b/media/libstagefright/codecs/mp3dec/Android.mk
index ec8d7ec..4837664 100644
--- a/media/libstagefright/codecs/mp3dec/Android.mk
+++ b/media/libstagefright/codecs/mp3dec/Android.mk
@@ -2,6 +2,7 @@ LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
+ MP3Decoder.cpp \
src/pvmp3_normalize.cpp \
src/pvmp3_alias_reduction.cpp \
src/pvmp3_crc.cpp \
@@ -52,6 +53,64 @@ LOCAL_CFLAGS := \
LOCAL_MODULE := libstagefright_mp3dec
+include $(BUILD_STATIC_LIBRARY)
+
+
+
+#LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+ src/pvmp3_normalize.cpp \
+ src/pvmp3_alias_reduction.cpp \
+ src/pvmp3_crc.cpp \
+ src/pvmp3_decode_header.cpp \
+ src/pvmp3_decode_huff_cw.cpp \
+ src/pvmp3_getbits.cpp \
+ src/pvmp3_dequantize_sample.cpp \
+ src/pvmp3_framedecoder.cpp \
+ src/pvmp3_get_main_data_size.cpp \
+ src/pvmp3_get_side_info.cpp \
+ src/pvmp3_get_scale_factors.cpp \
+ src/pvmp3_mpeg2_get_scale_data.cpp \
+ src/pvmp3_mpeg2_get_scale_factors.cpp \
+ src/pvmp3_mpeg2_stereo_proc.cpp \
+ src/pvmp3_huffman_decoding.cpp \
+ src/pvmp3_huffman_parsing.cpp \
+ src/pvmp3_tables.cpp \
+ src/pvmp3_imdct_synth.cpp \
+ src/pvmp3_mdct_6.cpp \
+ src/pvmp3_dct_6.cpp \
+ src/pvmp3_poly_phase_synthesis.cpp \
+ src/pvmp3_equalizer.cpp \
+ src/pvmp3_seek_synch.cpp \
+ src/pvmp3_stereo_proc.cpp \
+ src/pvmp3_reorder.cpp \
+
+ifeq ($(TARGET_ARCH),arm)
+LOCAL_SRC_FILES += \
+ src/asm/pvmp3_polyphase_filter_window_gcc.s \
+ src/asm/pvmp3_mdct_18_gcc.s \
+ src/asm/pvmp3_dct_9_gcc.s \
+ src/asm/pvmp3_dct_16_gcc.s
+else
+LOCAL_SRC_FILES += \
+ src/pvmp3_polyphase_filter_window.cpp \
+ src/pvmp3_mdct_18.cpp \
+ src/pvmp3_dct_9.cpp \
+ src/pvmp3_dct_16.cpp
+endif
+
+LOCAL_C_INCLUDES := \
+ frameworks/av/media/libstagefright/include \
+ $(LOCAL_PATH)/src \
+ $(LOCAL_PATH)/include
+
+LOCAL_CFLAGS := \
+ -DOSCL_UNUSED_ARG=
+
+LOCAL_MODULE := libstagefright_mp3dec_omx
+
LOCAL_ARM_MODE := arm
include $(BUILD_STATIC_LIBRARY)
@@ -73,7 +132,7 @@ LOCAL_SHARED_LIBRARIES := \
libstagefright libstagefright_omx libstagefright_foundation libutils
LOCAL_STATIC_LIBRARIES := \
- libstagefright_mp3dec
+ libstagefright_mp3dec_omx
LOCAL_MODULE := libstagefright_soft_mp3dec
LOCAL_MODULE_TAGS := optional
diff --git a/media/libstagefright/codecs/mp3dec/MP3Decoder.cpp b/media/libstagefright/codecs/mp3dec/MP3Decoder.cpp
new file mode 100644
index 0000000..c24aca0
--- /dev/null
+++ b/media/libstagefright/codecs/mp3dec/MP3Decoder.cpp
@@ -0,0 +1,586 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MP3Decoder.h"
+
+#include "include/pvmp3decoder_api.h"
+
+#include <media/stagefright/MediaBufferGroup.h>
+#include <media/stagefright/MediaDebug.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
+
+namespace android {
+
+// Everything must match except for
+// protection, bitrate, padding, private bits, mode extension,
+// copyright bit, original bit and emphasis.
+// Yes ... there are things that must indeed match...
+static const uint32_t kMask = 0xfffe0cc0;
+
+static bool get_mp3_frame_size(
+ uint32_t header, size_t *frame_size,
+ int *out_sampling_rate = NULL, int *out_channels = NULL,
+ int *out_bitrate = NULL) {
+ *frame_size = 0;
+
+ if (out_sampling_rate) {
+ *out_sampling_rate = 0;
+ }
+
+ if (out_channels) {
+ *out_channels = 0;
+ }
+
+ if (out_bitrate) {
+ *out_bitrate = 0;
+ }
+
+ if ((header & 0xffe00000) != 0xffe00000) {
+ return false;
+ }
+
+ unsigned version = (header >> 19) & 3;
+
+ if (version == 0x01) {
+ return false;
+ }
+
+ unsigned layer = (header >> 17) & 3;
+
+ if (layer == 0x00) {
+ return false;
+ }
+
+ unsigned protection = (header >> 16) & 1;
+
+ unsigned bitrate_index = (header >> 12) & 0x0f;
+
+ if (bitrate_index == 0 || bitrate_index == 0x0f) {
+ // Disallow "free" bitrate.
+ return false;
+ }
+
+ unsigned sampling_rate_index = (header >> 10) & 3;
+
+ if (sampling_rate_index == 3) {
+ return false;
+ }
+
+ static const int kSamplingRateV1[] = { 44100, 48000, 32000 };
+ int sampling_rate = kSamplingRateV1[sampling_rate_index];
+ if (version == 2 /* V2 */) {
+ sampling_rate /= 2;
+ } else if (version == 0 /* V2.5 */) {
+ sampling_rate /= 4;
+ }
+
+ unsigned padding = (header >> 9) & 1;
+
+ if (layer == 3) {
+ // layer I
+
+ static const int kBitrateV1[] = {
+ 32, 64, 96, 128, 160, 192, 224, 256,
+ 288, 320, 352, 384, 416, 448
+ };
+
+ static const int kBitrateV2[] = {
+ 32, 48, 56, 64, 80, 96, 112, 128,
+ 144, 160, 176, 192, 224, 256
+ };
+
+ int bitrate =
+ (version == 3 /* V1 */)
+ ? kBitrateV1[bitrate_index - 1]
+ : kBitrateV2[bitrate_index - 1];
+
+ if (out_bitrate) {
+ *out_bitrate = bitrate;
+ }
+
+ *frame_size = (12000 * bitrate / sampling_rate + padding) * 4;
+ } else {
+ // layer II or III
+
+ static const int kBitrateV1L2[] = {
+ 32, 48, 56, 64, 80, 96, 112, 128,
+ 160, 192, 224, 256, 320, 384
+ };
+
+ static const int kBitrateV1L3[] = {
+ 32, 40, 48, 56, 64, 80, 96, 112,
+ 128, 160, 192, 224, 256, 320
+ };
+
+ static const int kBitrateV2[] = {
+ 8, 16, 24, 32, 40, 48, 56, 64,
+ 80, 96, 112, 128, 144, 160
+ };
+
+ int bitrate;
+ if (version == 3 /* V1 */) {
+ bitrate = (layer == 2 /* L2 */)
+ ? kBitrateV1L2[bitrate_index - 1]
+ : kBitrateV1L3[bitrate_index - 1];
+ } else {
+ // V2 (or 2.5)
+
+ bitrate = kBitrateV2[bitrate_index - 1];
+ }
+
+ if (out_bitrate) {
+ *out_bitrate = bitrate;
+ }
+
+ if (version == 3 /* V1 */) {
+ *frame_size = 144000 * bitrate / sampling_rate + padding;
+ } else {
+ // V2 or V2.5
+ *frame_size = 72000 * bitrate / sampling_rate + padding;
+ }
+ }
+
+ if (out_sampling_rate) {
+ *out_sampling_rate = sampling_rate;
+ }
+
+ if (out_channels) {
+ int channel_mode = (header >> 6) & 3;
+
+ *out_channels = (channel_mode == 3) ? 1 : 2;
+ }
+
+ return true;
+}
+
+static bool resync(
+ uint8_t *data, uint32_t size, uint32_t match_header, off_t *out_pos) {
+
+ bool valid = false;
+ off_t pos = 0;
+ *out_pos = 0;
+ do {
+ if (pos + 4 > size) {
+ // Don't scan forever.
+ ALOGV("no dice, no valid sequence of frames found.");
+ break;
+ }
+
+ uint32_t header = U32_AT(data + pos);
+
+ if (match_header != 0 && (header & kMask) != (match_header & kMask)) {
+ ++pos;
+ continue;
+ }
+
+ ALOGV("found possible frame at %ld (header = 0x%08x)", pos, header);
+
+ // We found what looks like a valid frame,
+ valid = true;
+ *out_pos = pos;
+ } while (!valid);
+
+ return valid;
+}
+
+
+MP3Decoder::MP3Decoder(const sp<MediaSource> &source)
+ : mSource(source),
+ mNumChannels(0),
+ mStarted(false),
+ mBufferGroup(NULL),
+ mConfig(new tPVMP3DecoderExternal),
+ mDecoderBuf(NULL),
+ mAnchorTimeUs(0),
+ mNumFramesOutput(0),
+ mInputBuffer(NULL),
+ mPartialBuffer(NULL),
+ mFixedHeader(0) {
+ init();
+}
+
+void MP3Decoder::init() {
+ sp<MetaData> srcFormat = mSource->getFormat();
+
+ int32_t sampleRate;
+ CHECK(srcFormat->findInt32(kKeyChannelCount, &mNumChannels));
+ CHECK(srcFormat->findInt32(kKeySampleRate, &sampleRate));
+
+ mMeta = new MetaData;
+ mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
+ mMeta->setInt32(kKeyChannelCount, mNumChannels);
+ mMeta->setInt32(kKeySampleRate, sampleRate);
+
+ int64_t durationUs;
+ if (srcFormat->findInt64(kKeyDuration, &durationUs)) {
+ mMeta->setInt64(kKeyDuration, durationUs);
+ }
+
+ mMeta->setCString(kKeyDecoderComponent, "MP3Decoder");
+}
+
+MP3Decoder::~MP3Decoder() {
+ if (mStarted) {
+ stop();
+ }
+
+ delete mConfig;
+ mConfig = NULL;
+}
+
+status_t MP3Decoder::start(MetaData *params) {
+ CHECK(!mStarted);
+
+ mBufferGroup = new MediaBufferGroup;
+ mBufferGroup->add_buffer(new MediaBuffer(4608 * 2));
+
+ mConfig->equalizerType = flat;
+ mConfig->crcEnabled = false;
+
+ uint32_t memRequirements = pvmp3_decoderMemRequirements();
+ mDecoderBuf = malloc(memRequirements);
+
+ pvmp3_InitDecoder(mConfig, mDecoderBuf);
+
+ mSource->start();
+
+ mAnchorTimeUs = 0;
+ mNumFramesOutput = 0;
+ mStarted = true;
+
+ return OK;
+}
+
+status_t MP3Decoder::stop() {
+ CHECK(mStarted);
+
+ if (mInputBuffer) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ free(mDecoderBuf);
+ mDecoderBuf = NULL;
+
+ delete mBufferGroup;
+ mBufferGroup = NULL;
+
+ mSource->stop();
+
+ mStarted = false;
+
+ return OK;
+}
+
+sp<MetaData> MP3Decoder::getFormat() {
+ return mMeta;
+}
+
+status_t MP3Decoder::updatePartialFrame() {
+ status_t err = OK;
+ if (mPartialBuffer == NULL) {
+ return err;
+ }
+
+ size_t frameSize = 0;
+ uint32_t partialBufLen = mPartialBuffer->range_length();
+ uint32_t inputBufLen = mInputBuffer->range_length();
+ uint8_t frameHeader[4];
+ uint8_t *frmHdr;
+ uint32_t header;
+
+
+ // Look at the frame size and complete the partial frame
+ // Also check if a vaild header is found after the partial frame
+ if (partialBufLen < 4) { // check if partial frame has the 4 bytes header
+ if (inputBufLen < (4 - partialBufLen)) {
+ // input buffer does not have the frame header bytes
+ // bail out TODO
+ ALOGE("MP3Decoder::updatePartialFrame buffer to small header not found"
+ " partial buffer len %d, input buffer len %d",
+ partialBufLen, inputBufLen);
+ //mPartialBuffer->release();
+ //mPartialBuffer = NULL;
+ return UNKNOWN_ERROR;
+ }
+
+ // copy the header bytes to frameHeader
+ memcpy (frameHeader, mPartialBuffer->data(), partialBufLen);
+ memcpy (frameHeader + partialBufLen, mInputBuffer->data(), (4 - partialBufLen));
+ // get the first 4 bytes of the buffer
+ header = U32_AT((uint8_t *)frameHeader);
+ frmHdr = frameHeader;
+ } else {
+ frmHdr = (uint8_t *)mPartialBuffer->data();
+ }
+
+ // check if its a good frame, and the frame size
+ // get the first 4 bytes of the buffer
+ header = U32_AT(frmHdr);
+ bool curFrame = get_mp3_frame_size(header,&frameSize);
+ if (!curFrame) {
+ ALOGE("MP3Decoder::read - partial frame does not have a vaild header 0x%x",
+ header);
+ return UNKNOWN_ERROR;
+ }
+
+ // check if the following frame is good
+ uint32_t nextFrameOffset = frameSize - partialBufLen;
+ if ((nextFrameOffset + 4) <= inputBufLen) {
+ header = U32_AT((uint8_t *)mInputBuffer->data() + nextFrameOffset);
+ if ((header & 0xffe00000) != 0xffe00000) {
+ // next frame does not have a valid header,
+ // this may not be the next buffer, bail out.
+ ALOGE("MP3Decoder::read - next frame does not have a vaild header 0x%x",
+ header);
+ return UNKNOWN_ERROR;
+ }
+ } else {
+ // next frame header is out of range
+ // assume good header for now
+ ALOGE("MP3Decoder::read - assuming next frame is good");
+ }
+
+ // check if the input buffer has the remaining partial frame
+ if (frameSize > (partialBufLen + inputBufLen)) {
+ // input buffer does not have the remaining partial frame,
+ // discard data here as frame split in 3 buffers not supported
+ ALOGE("MP3Decoder::updatePartialFrame - input buffer does not have the complete frame."
+ " frame size %d, saved partial buffer len %d,"
+ " input buffer len %d", frameSize, partialBufLen, inputBufLen);
+ return UNKNOWN_ERROR;
+ }
+
+ // check if the mPartialBuffer can fit the remaining frame
+ if ((mPartialBuffer->size() - partialBufLen) < (frameSize - partialBufLen)) {
+ // mPartialBuffer is small to hold the reaming frame
+ //TODO
+ ALOGE("MP3Decoder::updatePartialFrame - mPartialBuffer is small, size %d, required &d",
+ (mPartialBuffer->size() - partialBufLen), (frameSize - partialBufLen));
+ return UNKNOWN_ERROR;
+ }
+
+ // done with error checks
+ // copy the partial frames to from a complete frame
+ // Copy the remaining frame from input buffer
+ uint32_t bytesRemaining = frameSize - mPartialBuffer->range_length();
+ memcpy ((uint8_t *)mPartialBuffer->data() + mPartialBuffer->range_length(),
+ (uint8_t *)mInputBuffer->data() + mInputBuffer->range_offset(),
+ bytesRemaining);
+
+ // mark the bytes as consumed from input buffer
+ mInputBuffer->set_range(
+ mInputBuffer->range_offset() + bytesRemaining,
+ mInputBuffer->range_length() - bytesRemaining);
+
+ // set the range and length of mPartialBuffer
+ mPartialBuffer->set_range(0,
+ mPartialBuffer->range_length() + bytesRemaining);
+
+ ALOGE("MP3Decoder::updatePartialFrame - copied the partial frame %d, input buffer length %d",
+ bytesRemaining, mInputBuffer->range_length());
+
+ return err;
+}
+
+status_t MP3Decoder::read(
+ MediaBuffer **out, const ReadOptions *options) {
+ status_t err;
+
+ *out = NULL;
+ bool usedPartialFrame = false;
+ bool seekSource = false;
+
+ int64_t seekTimeUs;
+ ReadOptions::SeekMode mode;
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ CHECK(seekTimeUs >= 0);
+
+ mNumFramesOutput = 0;
+ seekSource = true;
+
+ if (mInputBuffer) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ if (mPartialBuffer) {
+ mPartialBuffer->release();
+ mPartialBuffer = NULL;
+ }
+
+ // Make sure that the next buffer output does not still
+ // depend on fragments from the last one decoded.
+ pvmp3_InitDecoder(mConfig, mDecoderBuf);
+ } else {
+ seekTimeUs = -1;
+ }
+
+ if (mInputBuffer == NULL) {
+ err = mSource->read(&mInputBuffer, options);
+
+ if (err != OK) {
+ return err;
+ }
+
+ if ((mFixedHeader == 0) && (mInputBuffer->range_length() > 4)) {
+ //save the first 4 bytes as fixed header for the reset of the file
+ mFixedHeader = U32_AT((uint8_t *)mInputBuffer->data());
+ }
+
+ if (seekSource == true) {
+ off_t syncOffset = 0;
+ bool valid = resync((uint8_t *)mInputBuffer->data() + mInputBuffer->range_offset()
+ ,mInputBuffer->range_length(), mFixedHeader, &syncOffset);
+ if (valid) {
+ // consume these bytes, we might find a frame header in next buffer
+ mInputBuffer->set_range(
+ mInputBuffer->range_offset() + syncOffset,
+ mInputBuffer->range_length() - syncOffset);
+ ALOGV("mp3 decoder found a sync point after seek syncOffset %d", syncOffset);
+ } else {
+ ALOGV("NO SYNC POINT found, buffer length %d",mInputBuffer->range_length());
+ }
+ }
+
+ int64_t timeUs;
+ if (mInputBuffer->meta_data()->findInt64(kKeyTime, &timeUs)) {
+ mAnchorTimeUs = timeUs;
+ mNumFramesOutput = 0;
+ } else {
+ // We must have a new timestamp after seeking.
+ CHECK(seekTimeUs < 0);
+ }
+ // check for partial frame
+ if (mPartialBuffer != NULL) {
+ err = updatePartialFrame();
+ if (err != OK) {
+ // updating partial frame failed, discard the previously
+ // saved partial frame and continue
+ mPartialBuffer->release();
+ mPartialBuffer = NULL;
+ err = OK;
+ }
+ }
+ }
+
+ MediaBuffer *buffer;
+ CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK);
+
+ if (mPartialBuffer != NULL) {
+ mConfig->pInputBuffer =
+ (uint8_t *)mPartialBuffer->data() + mPartialBuffer->range_offset();
+ mConfig->inputBufferCurrentLength = mPartialBuffer->range_length();
+ usedPartialFrame = true;
+ } else {
+ mConfig->pInputBuffer =
+ (uint8_t *)mInputBuffer->data() + mInputBuffer->range_offset();
+ mConfig->inputBufferCurrentLength = mInputBuffer->range_length();
+ }
+
+ mConfig->inputBufferMaxLength = 0;
+ mConfig->inputBufferUsedLength = 0;
+
+ mConfig->outputFrameSize = buffer->size() / sizeof(int16_t);
+ mConfig->pOutputBuffer = static_cast<int16_t *>(buffer->data());
+
+ ERROR_CODE decoderErr;
+ if ((decoderErr = pvmp3_framedecoder(mConfig, mDecoderBuf))
+ != NO_DECODING_ERROR) {
+ ALOGV("mp3 decoder returned error %d", decoderErr);
+
+ if ((decoderErr != NO_ENOUGH_MAIN_DATA_ERROR) &&
+ (decoderErr != SYNCH_LOST_ERROR)) {
+ buffer->release();
+ buffer = NULL;
+
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ if (mPartialBuffer) {
+ mPartialBuffer->release();
+ mPartialBuffer = NULL;
+ }
+ ALOGE("mp3 decoder returned UNKNOWN_ERROR");
+
+ return UNKNOWN_ERROR;
+ }
+
+ if ((mPartialBuffer == NULL) && (decoderErr == NO_ENOUGH_MAIN_DATA_ERROR)) {
+ // Might be a partial frame, save it
+ mPartialBuffer = new MediaBuffer(mInputBuffer->size());
+ memcpy ((uint8_t *)mPartialBuffer->data(),
+ mConfig->pInputBuffer, mConfig->inputBufferCurrentLength);
+ mPartialBuffer->set_range(0, mConfig->inputBufferCurrentLength);
+ // set output buffer to 0
+ mConfig->outputFrameSize = 0;
+ // consume the copied bytes from input
+ mConfig->inputBufferUsedLength = mConfig->inputBufferCurrentLength;
+ } else if(decoderErr == SYNCH_LOST_ERROR) {
+ // Try to find the mp3 frame header in the current buffer
+ off_t syncOffset = 0;
+ bool valid = resync(mConfig->pInputBuffer, mConfig->inputBufferCurrentLength,
+ mFixedHeader, &syncOffset);
+ if (!valid || !syncOffset) {
+ // consume these bytes, we might find a frame header in next buffer
+ syncOffset = mConfig->inputBufferCurrentLength;
+ }
+ // set output buffer to 0
+ mConfig->outputFrameSize = 0;
+ // consume the junk bytes from input buffer
+ mConfig->inputBufferUsedLength = syncOffset;
+ } else {
+ // This is recoverable, just ignore the current frame and
+ // play silence instead.
+ memset(buffer->data(), 0, mConfig->outputFrameSize * sizeof(int16_t));
+ mConfig->inputBufferUsedLength = mInputBuffer->range_length();
+ }
+ }
+
+ buffer->set_range(
+ 0, mConfig->outputFrameSize * sizeof(int16_t));
+
+ if ((mPartialBuffer != NULL) && usedPartialFrame) {
+ mPartialBuffer->set_range(
+ mPartialBuffer->range_offset() + mConfig->inputBufferUsedLength,
+ mPartialBuffer->range_length() - mConfig->inputBufferUsedLength);
+ mPartialBuffer->release();
+ mPartialBuffer = NULL;
+ } else {
+ mInputBuffer->set_range(
+ mInputBuffer->range_offset() + mConfig->inputBufferUsedLength,
+ mInputBuffer->range_length() - mConfig->inputBufferUsedLength);
+ }
+
+ if (mInputBuffer->range_length() == 0) {
+ mInputBuffer->release();
+ mInputBuffer = NULL;
+ }
+
+ buffer->meta_data()->setInt64(
+ kKeyTime,
+ mAnchorTimeUs
+ + (mNumFramesOutput * 1000000) / mConfig->samplingRate);
+
+ mNumFramesOutput += mConfig->outputFrameSize / mNumChannels;
+
+ *out = buffer;
+
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 2704a37..834e6b3 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -94,11 +94,20 @@ SoftwareRenderer::SoftwareRenderer(
CHECK(mCropHeight > 0);
CHECK(mConverter == NULL || mConverter->isValid());
+#ifdef EXYNOS4_ENHANCEMENTS
+ CHECK_EQ(0,
+ native_window_set_usage(
+ mNativeWindow.get(),
+ GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN
+ | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP
+ | GRALLOC_USAGE_HW_FIMC1 | GRALLOC_USAGE_HWC_HWOVERLAY));
+#else
CHECK_EQ(0,
native_window_set_usage(
mNativeWindow.get(),
GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN
| GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP));
+#endif
CHECK_EQ(0,
native_window_set_scaling_mode(
@@ -200,13 +209,25 @@ void SoftwareRenderer::render(
const uint8_t *src_uv =
(const uint8_t *)data + mWidth * (mHeight - mCropTop / 2);
- uint8_t *dst_y = (uint8_t *)dst;
+#ifdef EXYNOS4_ENHANCEMENTS
+ void *pYUVBuf[3];
+
+ CHECK_EQ(0, mapper.unlock(buf->handle));
+ CHECK_EQ(0, mapper.lock(
+ buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN | GRALLOC_USAGE_YUV_ADDR, bounds, pYUVBuf));
+ size_t dst_c_stride = buf->stride / 2;
+ uint8_t *dst_y = (uint8_t *)pYUVBuf[0];
+ uint8_t *dst_v = (uint8_t *)pYUVBuf[1];
+ uint8_t *dst_u = (uint8_t *)pYUVBuf[2];
+#else
size_t dst_y_size = buf->stride * buf->height;
size_t dst_c_stride = ALIGN(buf->stride / 2, 16);
size_t dst_c_size = dst_c_stride * buf->height / 2;
+ uint8_t *dst_y = (uint8_t *)dst;
uint8_t *dst_v = dst_y + dst_y_size;
uint8_t *dst_u = dst_v + dst_c_size;
+#endif
for (int y = 0; y < mCropHeight; ++y) {
memcpy(dst_y, src_y, mCropWidth);
diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk
index ff35d4a..d500aa6 100644
--- a/media/libstagefright/id3/Android.mk
+++ b/media/libstagefright/id3/Android.mk
@@ -4,6 +4,10 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
ID3.cpp
+ifneq ($(TI_CUSTOM_DOMX_PATH),)
+LOCAL_C_INCLUDES += $(TI_CUSTOM_DOMX_PATH)/omx_core/inc
+endif
+
LOCAL_MODULE := libstagefright_id3
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 1422687..ee885a5 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 The Android Open Source Project
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -200,6 +201,9 @@ private:
bool mWatchForAudioSeekComplete;
bool mWatchForAudioEOS;
+#ifdef QCOM_ENHANCED_AUDIO
+ static int mTunnelAliveAP;
+#endif
sp<TimedEventQueue::Event> mVideoEvent;
bool mVideoEventPending;
@@ -300,6 +304,12 @@ private:
ASSIGN
};
void modifyFlags(unsigned value, FlagMode mode);
+ void logFirstFrame();
+ void logCatchUp(int64_t ts, int64_t clock, int64_t delta);
+ void logLate(int64_t ts, int64_t clock, int64_t delta);
+ void logOnTime(int64_t ts, int64_t clock, int64_t delta);
+ int64_t getTimeOfDayUs();
+ bool mStatistics;
struct TrackStat {
String8 mMIME;
@@ -325,6 +335,21 @@ private:
int32_t mVideoHeight;
uint32_t mFlags;
Vector<TrackStat> mTracks;
+
+ int64_t mConsecutiveFramesDropped;
+ uint32_t mCatchupTimeStart;
+ uint32_t mNumTimesSyncLoss;
+ uint32_t mMaxEarlyDelta;
+ uint32_t mMaxLateDelta;
+ uint32_t mMaxTimeSyncLoss;
+ uint64_t mTotalFrames;
+ int64_t mFirstFrameLatencyStartUs; //first frame latency start
+ int64_t mFirstFrameLatencyUs;
+ int64_t mLastFrameUs;
+ bool mVeryFirstFrame;
+ int64_t mTotalTimeUs;
+ int64_t mLastPausedTimeMs;
+ int64_t mLastSeekToTimeMs;
} mStats;
status_t setVideoScalingMode(int32_t mode);
@@ -339,6 +364,13 @@ private:
size_t countTracks() const;
+#ifdef USE_TUNNEL_MODE
+ bool inSupportedTunnelFormats(const char * mime);
+
+ //Flag to check if tunnel mode audio is enabled
+ bool mIsTunnelAudio;
+#endif
+
AwesomePlayer(const AwesomePlayer &);
AwesomePlayer &operator=(const AwesomePlayer &);
};
diff --git a/media/libstagefright/include/ExtendedExtractor.h b/media/libstagefright/include/ExtendedExtractor.h
new file mode 100644
index 0000000..e7d8704
--- /dev/null
+++ b/media/libstagefright/include/ExtendedExtractor.h
@@ -0,0 +1,58 @@
+/*Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of The Linux Foundation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef EXTENDED_EXTRACTOR_
+#define EXTENDED_EXTRACTOR_
+
+#include <media/stagefright/DataSource.h>
+
+namespace android {
+
+class MediaExtractor;
+
+typedef MediaExtractor* (*MediaExtractorFactory)(const sp<DataSource> &source, const char* mime);
+
+static const char* MEDIA_CREATE_EXTRACTOR = "CreateExtractor";
+
+typedef bool (*ExtendedExtractorSniffers)(const sp<DataSource> &source, String8 *mimeType,
+ float *confidence,sp<AMessage> *meta);
+
+static const char* EXTENDED_EXTRACTOR_SNIFFERS = "SniffExtendedExtractor";
+
+class ExtendedExtractor
+{
+public:
+ static MediaExtractor* CreateExtractor(const sp<DataSource> &source, const char *mime);
+};
+
+bool SniffExtendedExtractor(const sp<DataSource> &source, String8 *mimeType,
+ float *confidence,sp<AMessage> *meta);
+
+} // namespace android
+
+#endif //EXTENDED_EXTRACTOR_
diff --git a/media/libstagefright/include/MP3Decoder.h b/media/libstagefright/include/MP3Decoder.h
new file mode 100644
index 0000000..8ff570a
--- /dev/null
+++ b/media/libstagefright/include/MP3Decoder.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MP3_DECODER_H_
+
+#define MP3_DECODER_H_
+
+#include <media/stagefright/MediaSource.h>
+
+struct tPVMP3DecoderExternal;
+
+namespace android {
+
+struct MediaBufferGroup;
+
+struct MP3Decoder : public MediaSource {
+ MP3Decoder(const sp<MediaSource> &source);
+
+ virtual status_t start(MetaData *params);
+ virtual status_t stop();
+
+ virtual sp<MetaData> getFormat();
+
+ virtual status_t read(
+ MediaBuffer **buffer, const ReadOptions *options);
+
+protected:
+ virtual ~MP3Decoder();
+
+private:
+ sp<MediaSource> mSource;
+ sp<MetaData> mMeta;
+ int32_t mNumChannels;
+
+ bool mStarted;
+
+ MediaBufferGroup *mBufferGroup;
+
+ tPVMP3DecoderExternal *mConfig;
+ void *mDecoderBuf;
+ int64_t mAnchorTimeUs;
+ int64_t mNumFramesOutput;
+ uint32_t mFixedHeader;
+
+ MediaBuffer *mInputBuffer;
+ MediaBuffer *mPartialBuffer;
+
+ void init();
+
+ MP3Decoder(const MP3Decoder &);
+ MP3Decoder &operator=(const MP3Decoder &);
+ status_t updatePartialFrame();
+};
+
+} // namespace android
+
+#endif // MP3_DECODER_H_
diff --git a/media/libstagefright/include/PCMExtractor.h b/media/libstagefright/include/PCMExtractor.h
new file mode 100644
index 0000000..4717d10
--- /dev/null
+++ b/media/libstagefright/include/PCMExtractor.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) 2010 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Author: Andreas Gustafsson (andreas.a.gustafsson@stericsson.com)
+ * for ST-Ericsson
+ */
+
+#ifndef PCM_EXTRACTOR_H_
+
+#define PCM_EXTRACTOR_H_
+
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaSource.h>
+#include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+class PCMExtractor : public MediaExtractor {
+public:
+ // Extractor assumes ownership of "source".
+ PCMExtractor(const sp<DataSource> &source);
+
+ virtual size_t countTracks();
+ virtual sp<MediaSource> getTrack(size_t index);
+ virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
+
+ virtual sp<MetaData> getMetaData();
+
+protected:
+ virtual ~PCMExtractor();
+
+private:
+ sp<DataSource> mDataSource;
+ status_t mInitCheck;
+ bool mValidFormat;
+ off_t mDataOffset;
+ size_t mDataSize;
+ sp<MetaData> mTrackMeta;
+
+ status_t init();
+
+ DISALLOW_EVIL_CONSTRUCTORS(PCMExtractor);
+};
+
+} // namespace android
+
+#endif // PCM_EXTRACTOR_H_
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index d7fbbbe..5d6467d 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -23,6 +23,10 @@ LOCAL_SHARED_LIBRARIES := \
libstagefright_foundation \
libdl
+ifeq ($(BOARD_USES_PROPRIETARY_OMX),SAMSUNG)
+LOCAL_CFLAGS += -DSAMSUNG_OMX
+endif
+
LOCAL_MODULE:= libstagefright_omx
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp
index 6b6d0ab..c544770 100644
--- a/media/libstagefright/omx/OMXMaster.cpp
+++ b/media/libstagefright/omx/OMXMaster.cpp
@@ -45,6 +45,9 @@ OMXMaster::~OMXMaster() {
void OMXMaster::addVendorPlugin() {
addPlugin("libstagefrighthw.so");
+#ifdef SAMSUNG_OMX
+ addPlugin("libsomxcore.so");
+#endif
}
void OMXMaster::addPlugin(const char *libname) {
diff --git a/media/libstagefright/timedtext/Android.mk b/media/libstagefright/timedtext/Android.mk
index f099bbd..4c6172a 100644
--- a/media/libstagefright/timedtext/Android.mk
+++ b/media/libstagefright/timedtext/Android.mk
@@ -14,6 +14,10 @@ LOCAL_C_INCLUDES:= \
$(TOP)/frameworks/av/include/media/stagefright/timedtext \
$(TOP)/frameworks/av/media/libstagefright
+ifneq ($(TI_CUSTOM_DOMX_PATH),)
+LOCAL_C_INCLUDES += $(TI_CUSTOM_DOMX_PATH)/omx_core/inc
+endif
+
LOCAL_MODULE:= libstagefright_timedtext
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index 75098f1..b3ec4d6 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -66,6 +66,10 @@ include $(BUILD_EXECUTABLE)
include $(CLEAR_VARS)
+ifneq ($(TI_CUSTOM_DOMX_PATH),)
+LOCAL_C_INCLUDES:= $(TI_CUSTOM_DOMX_PATH)/omx_core/inc
+endif
+
LOCAL_SRC_FILES:= \
udptest.cpp \
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 08f67f9..eecc59b 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -826,7 +826,10 @@ status_t WifiDisplaySource::onReceiveM3Response(
}
mUsingHDCP = false;
- if (!params->findParameter("wfd_content_protection", &value)) {
+ if (property_get("persist.sys.wfd.nohdcp", val, NULL)
+ && !strcmp("1", val)) {
+ ALOGI("Content protection has been disabled for WFD sinks");
+ } else if (!params->findParameter("wfd_content_protection", &value)) {
ALOGI("Sink doesn't appear to support content protection.");
} else if (value == "none") {
ALOGI("Sink does not support content protection.");
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 5a73cdd..1ff87c8 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -11,6 +11,11 @@ LOCAL_SHARED_LIBRARIES := \
libutils \
libbinder
+ifeq ($(BOARD_USE_SECTVOUT),true)
+ LOCAL_CFLAGS += -DSECTVOUT
+ LOCAL_SHARED_LIBRARIES += libTVOut
+endif
+
# FIXME The duplicate audioflinger is temporary
LOCAL_C_INCLUDES := \
frameworks/av/media/libmediaplayerservice \
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index ddd5b84..993715e 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -31,12 +31,21 @@
using namespace android;
+#ifdef SECTVOUT
+namespace android { namespace SecTVOutService {
+void instantiate(void);
+} }
+#endif
+
int main(int argc, char** argv)
{
signal(SIGPIPE, SIG_IGN);
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
+#ifdef SECTVOUT
+ SecTVOutService::instantiate();
+#endif
AudioFlinger::instantiate();
MediaPlayerService::instantiate();
CameraService::instantiate();
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 2899953..a14c205 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -13,6 +13,10 @@ include $(BUILD_STATIC_LIBRARY)
include $(CLEAR_VARS)
+ifeq ($(TARGET_QCOM_AUDIO_VARIANT),caf)
+LOCAL_CFLAGS += -DQCOM_ENHANCED_AUDIO
+endif
+
LOCAL_SRC_FILES:= \
AudioFlinger.cpp \
AudioMixer.cpp.arm \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 1913b6f..bcb9756 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -1,6 +1,11 @@
/*
**
** Copyright 2007, The Android Open Source Project
+** Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+**
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -100,6 +105,11 @@
#define ALOGVV(a...) do { } while(0)
#endif
+#ifdef QCOM_HARDWARE
+#define DIRECT_TRACK_EOS 1
+static const char lockName[] = "DirectTrack";
+#endif
+
namespace android {
static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
@@ -206,11 +216,13 @@ static int load_audio_interface(const char *if_name, audio_hw_device_t **dev)
if (rc) {
goto out;
}
+#if !defined(ICS_AUDIO_BLOB) && !defined(MR0_AUDIO_BLOB)
if ((*dev)->common.version != AUDIO_DEVICE_API_VERSION_CURRENT) {
ALOGE("%s wrong audio hw device version %04x", __func__, (*dev)->common.version);
rc = BAD_VALUE;
goto out;
}
+#endif
return 0;
out:
@@ -218,6 +230,14 @@ out:
return rc;
}
+static uint32_t getInputChannelCount(uint32_t channels) {
+#ifdef QCOM_HARDWARE
+ // only mono or stereo and 5.1 are supported for input sources
+ return popcount((channels) & (AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_MONO | AUDIO_CHANNEL_IN_5POINT1));
+#else
+ return popcount(channels);
+#endif
+}
// ----------------------------------------------------------------------------
AudioFlinger::AudioFlinger()
@@ -235,10 +255,18 @@ AudioFlinger::AudioFlinger()
void AudioFlinger::onFirstRef()
{
int rc = 0;
+#ifdef QCOM_HARDWARE
+ mA2DPHandle = -1;
+#endif
Mutex::Autolock _l(mLock);
/* TODO: move all this work into an Init() function */
+#ifdef QCOM_HARDWARE
+ mLPASessionId = -2; // -2 is invalid session ID
+ mIsEffectConfigChanged = false;
+ mLPAEffectChain = NULL;
+#endif
char val_str[PROPERTY_VALUE_MAX] = { 0 };
if (property_get("ro.audio.flinger_standbytime_ms", val_str, NULL) >= 0) {
uint32_t int_val;
@@ -298,6 +326,13 @@ AudioFlinger::AudioHwDevice* AudioFlinger::findSuitableHwDev_l(
if ((dev->get_supported_devices != NULL) &&
(dev->get_supported_devices(dev) & devices) == devices)
return audioHwDevice;
+#ifdef ICS_AUDIO_BLOB
+ else if (dev->get_supported_devices == NULL && i != 0 &&
+ devices == 0x80)
+ // Reasonably safe assumption: A non-primary HAL without
+ // get_supported_devices is a locally-built A2DP binary
+ return audioHwDevice;
+#endif
}
} else {
// check a match for the requested module handle
@@ -544,9 +579,218 @@ Exit:
return trackHandle;
}
+#ifdef QCOM_HARDWARE
+sp<IDirectTrack> AudioFlinger::createDirectTrack(
+ pid_t pid,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t output,
+ int *sessionId,
+ IDirectTrackClient *client,
+ audio_stream_type_t streamType,
+ status_t *status)
+{
+ *status = NO_ERROR;
+ status_t lStatus = NO_ERROR;
+ sp<IDirectTrack> track = NULL;
+ DirectAudioTrack* directTrack = NULL;
+ Mutex::Autolock _l(mLock);
+
+ ALOGV("createDirectTrack() sessionId: %d sampleRate %d channelMask %d",
+ *sessionId, sampleRate, channelMask);
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if(desc == NULL) {
+ ALOGE("Error: Invalid output (%d) to create direct audio track", output);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
+ desc->mStreamType = streamType;
+ if (desc->flag & AUDIO_OUTPUT_FLAG_LPA) {
+ if (sessionId != NULL && *sessionId != AUDIO_SESSION_OUTPUT_MIX) {
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
+ // Check if the session ID is already associated with a track
+ uint32_t sessions = t->hasAudioSession(*sessionId);
+
+ // check if an effect with same session ID is waiting for a ssession to be created
+ ALOGV("check if an effect with same session ID is waiting for a ssession to be created");
+ if ((mLPAEffectChain == NULL) && (sessions & PlaybackThread::EFFECT_SESSION)) {
+ // Clear reference to previous effect chain if any
+ t->mLock.lock();
+ ALOGV("getting the LPA effect chain and setting LPA flag to true.");
+ mLPAEffectChain = t->getEffectChain_l(*sessionId);
+ t->mLock.unlock();
+ }
+ }
+ mLPASessionId = *sessionId;
+ if (mLPAEffectChain != NULL) {
+ mLPAEffectChain->setLPAFlag(true);
+ // For LPA, the volume will be applied in DSP. No need for volume
+ // control in the Effect chain, so setting it to unity.
+ uint32_t volume = 0x1000000; // Equals to 1.0 in 8.24 format
+ mLPAEffectChain->setVolume_l(&volume,&volume);
+ } else {
+ ALOGW("There was no effectChain created for the sessionId(%d)", mLPASessionId);
+ }
+ mLPASampleRate = sampleRate;
+ mLPANumChannels = popcount(channelMask);
+ } else {
+ if(sessionId != NULL) {
+ ALOGE("Error: Invalid sessionID (%d) for direct audio track", *sessionId);
+ }
+ }
+ }
+ mLock.unlock();
+ directTrack = new DirectAudioTrack(this, output, desc, client, desc->flag);
+ desc->trackRefPtr = dynamic_cast<void *>(directTrack);
+ mLock.lock();
+ if (directTrack != 0) {
+ track = dynamic_cast<IDirectTrack *>(directTrack);
+ AudioEventObserver* obv = dynamic_cast<AudioEventObserver *>(directTrack);
+ ALOGE("setting observer mOutputDesc track %p, obv %p", track.get(), obv);
+ desc->stream->set_observer(desc->stream, reinterpret_cast<void *>(obv));
+ } else {
+ lStatus = BAD_VALUE;
+ }
+Exit:
+ if(lStatus) {
+ if (track != NULL) {
+ track.clear();
+ }
+ *status = lStatus;
+ }
+ return track;
+}
+
+void AudioFlinger::deleteEffectSession()
+{
+ Mutex::Autolock _l(mLock);
+ ALOGV("deleteSession");
+ // -2 is invalid session ID
+ mLPASessionId = -2;
+ if (mLPAEffectChain != NULL) {
+ mLPAEffectChain->setLPAFlag(false);
+ size_t i, numEffects = mLPAEffectChain->getNumEffects();
+ for(i = 0; i < numEffects; i++) {
+ sp<EffectModule> effect = mLPAEffectChain->getEffectFromIndex_l(i);
+ effect->setInBuffer(mLPAEffectChain->inBuffer());
+ if (i == numEffects-1) {
+ effect->setOutBuffer(mLPAEffectChain->outBuffer());
+ } else {
+ effect->setOutBuffer(mLPAEffectChain->inBuffer());
+ }
+ effect->configure();
+ }
+ mLPAEffectChain.clear();
+ mLPAEffectChain = NULL;
+ }
+}
+
+// ToDo: Should we go ahead with this frameCount?
+#define DEAFULT_FRAME_COUNT 1200
+void AudioFlinger::applyEffectsOn(void *token, int16_t *inBuffer, int16_t *outBuffer, int size)
+{
+ ALOGV("applyEffectsOn: inBuf %p outBuf %p size %d token %p", inBuffer, outBuffer, size, token);
+ // This might be the first buffer to apply effects after effect config change
+ // should not skip effects processing
+ mIsEffectConfigChanged = false;
+
+ volatile size_t numEffects = 0;
+ if(mLPAEffectChain != NULL) {
+ numEffects = mLPAEffectChain->getNumEffects();
+ }
+
+ if( numEffects > 0) {
+ size_t i = 0;
+ int16_t *pIn = inBuffer;
+ int16_t *pOut = outBuffer;
+
+ int frameCount = size / (sizeof(int16_t) * mLPANumChannels);
+
+ while(frameCount > 0) {
+ if(mLPAEffectChain == NULL) {
+ ALOGV("LPA Effect Chain is removed - No effects processing !!");
+ numEffects = 0;
+ break;
+ }
+ mLPAEffectChain->lock();
+
+ numEffects = mLPAEffectChain->getNumEffects();
+ if(!numEffects) {
+ ALOGV("applyEffectsOn: All the effects are removed - nothing to process");
+ mLPAEffectChain->unlock();
+ break;
+ }
+
+ int outFrameCount = (frameCount > DEAFULT_FRAME_COUNT ? DEAFULT_FRAME_COUNT: frameCount);
+ bool isEffectEnabled = false;
+ for(i = 0; i < numEffects; i++) {
+ // If effect configuration is changed while applying effects do not process further
+ if(mIsEffectConfigChanged) {
+ mLPAEffectChain->unlock();
+ ALOGV("applyEffectsOn: mIsEffectConfigChanged is set - no further processing");
+ return;
+ }
+ sp<EffectModule> effect = mLPAEffectChain->getEffectFromIndex_l(i);
+ if(effect == NULL) {
+ ALOGE("getEffectFromIndex_l(%d) returned NULL ptr", i);
+ mLPAEffectChain->unlock();
+ return;
+ }
+ if(i == 0) {
+ // For the first set input and output buffers different
+ isEffectEnabled = effect->isProcessEnabled();
+ effect->setInBuffer(pIn);
+ effect->setOutBuffer(pOut);
+ } else {
+ // For the remaining use previous effect's output buffer as input buffer
+ effect->setInBuffer(pOut);
+ effect->setOutBuffer(pOut);
+ }
+ // true indicates that it is being applied on LPA output
+ effect->configure(true, mLPASampleRate, mLPANumChannels, outFrameCount);
+ }
+
+ if(isEffectEnabled) {
+ // Clear the output buffer
+ memset(pOut, 0, (outFrameCount * mLPANumChannels * sizeof(int16_t)));
+ } else {
+ // Copy input buffer content to the output buffer
+ memcpy(pOut, pIn, (outFrameCount * mLPANumChannels * sizeof(int16_t)));
+ }
+
+ mLPAEffectChain->process_l();
+
+ mLPAEffectChain->unlock();
+
+ // Update input and output buffer pointers
+ pIn += (outFrameCount * mLPANumChannels);
+ pOut += (outFrameCount * mLPANumChannels);
+ frameCount -= outFrameCount;
+ }
+ }
+
+ if (!numEffects) {
+ ALOGV("applyEffectsOn: There are no effects to be applied");
+ if(inBuffer != outBuffer) {
+ // No effect applied so just copy input buffer to output buffer
+ memcpy(outBuffer, inBuffer, size);
+ }
+ }
+}
+#endif
+
uint32_t AudioFlinger::sampleRate(audio_io_handle_t output) const
{
Mutex::Autolock _l(mLock);
+#ifdef QCOM_HARDWARE
+ if (!mDirectAudioTracks.isEmpty()) {
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if(desc != NULL) {
+ return desc->stream->common.get_sample_rate(&desc->stream->common);
+ }
+ }
+#endif
PlaybackThread *thread = checkPlaybackThread_l(output);
if (thread == NULL) {
ALOGW("sampleRate() unknown thread %d", output);
@@ -558,6 +802,12 @@ uint32_t AudioFlinger::sampleRate(audio_io_handle_t output) const
int AudioFlinger::channelCount(audio_io_handle_t output) const
{
Mutex::Autolock _l(mLock);
+#ifdef QCOM_HARDWARE
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if(desc != NULL) {
+ return desc->stream->common.get_channels(&desc->stream->common);
+ }
+#endif
PlaybackThread *thread = checkPlaybackThread_l(output);
if (thread == NULL) {
ALOGW("channelCount() unknown thread %d", output);
@@ -580,6 +830,12 @@ audio_format_t AudioFlinger::format(audio_io_handle_t output) const
size_t AudioFlinger::frameCount(audio_io_handle_t output) const
{
Mutex::Autolock _l(mLock);
+#ifdef QCOM_HARDWARE
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if(desc != NULL) {
+ return desc->stream->common.get_buffer_size(&desc->stream->common);
+ }
+#endif
PlaybackThread *thread = checkPlaybackThread_l(output);
if (thread == NULL) {
ALOGW("frameCount() unknown thread %d", output);
@@ -613,6 +869,9 @@ status_t AudioFlinger::setMasterVolume(float value)
return PERMISSION_DENIED;
}
+#ifdef QCOM_HARDWARE
+ mA2DPHandle = -1;
+#endif
Mutex::Autolock _l(mLock);
mMasterVolume = value;
@@ -724,6 +983,7 @@ status_t AudioFlinger::setMasterMute(bool muted)
mMasterMute = muted;
// Set master mute in the HALs which support it.
+#ifndef ICS_AUDIO_BLOB
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
AutoMutex lock(mHardwareLock);
AudioHwDevice *dev = mAudioHwDevs.valueAt(i);
@@ -734,6 +994,7 @@ status_t AudioFlinger::setMasterMute(bool muted)
}
mHardwareStatus = AUDIO_HW_IDLE;
}
+#endif
// Now set the master mute in each playback thread. Playback threads
// assigned to HALs which do not have master mute support will apply master
@@ -781,12 +1042,35 @@ status_t AudioFlinger::setStreamVolume(audio_stream_type_t stream, float value,
}
AutoMutex lock(mLock);
+#ifdef QCOM_HARDWARE
+ ALOGV("setStreamVolume stream %d, output %d, value %f",stream, output, value);
+ AudioSessionDescriptor *desc = NULL;
+ if (!mDirectAudioTracks.isEmpty()) {
+ desc = mDirectAudioTracks.valueFor(output);
+ if (desc != NULL) {
+ ALOGV("setStreamVolume for mAudioTracks size %d desc %p",mDirectAudioTracks.size(),desc);
+ if (desc->mStreamType == stream) {
+ mStreamTypes[stream].volume = value;
+ desc->stream->set_volume(desc->stream,
+ desc->mVolumeLeft * mStreamTypes[stream].volume,
+ desc->mVolumeRight* mStreamTypes[stream].volume);
+ return NO_ERROR;
+ }
+ }
+ }
+#endif
PlaybackThread *thread = NULL;
if (output) {
thread = checkPlaybackThread_l(output);
if (thread == NULL) {
+#ifdef QCOM_HARDWARE
+ if (desc != NULL) {
+ return NO_ERROR;
+ }
+#endif
return BAD_VALUE;
}
+
}
mStreamTypes[stream].volume = value;
@@ -913,6 +1197,26 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
return final_result;
}
+#ifdef QCOM_HARDWARE
+ AudioSessionDescriptor *desc = NULL;
+ if (!mDirectAudioTracks.isEmpty()) {
+ desc = mDirectAudioTracks.valueFor(ioHandle);
+ if (desc != NULL) {
+ ALOGV("setParameters for mAudioTracks size %d desc %p",mDirectAudioTracks.size(),desc);
+ desc->stream->common.set_parameters(&desc->stream->common, keyValuePairs.string());
+ AudioParameter param = AudioParameter(keyValuePairs);
+ String8 key = String8(AudioParameter::keyRouting);
+ int device;
+ if (param.getInt(key, device) == NO_ERROR) {
+ if(mLPAEffectChain != NULL){
+ mLPAEffectChain->setDevice_l(device);
+ audioConfigChanged_l(AudioSystem::EFFECT_CONFIG_CHANGED, 0, NULL);
+ }
+ }
+ }
+ }
+#endif
+
// hold a strong ref on thread in case closeOutput() or closeInput() is called
// and the thread is exited once the lock is released
sp<ThreadBase> thread;
@@ -925,13 +1229,17 @@ status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8&
// indicate output device change to all input threads for pre processing
AudioParameter param = AudioParameter(keyValuePairs);
int value;
+ DefaultKeyedVector< int, sp<RecordThread> > recordThreads = mRecordThreads;
+ mLock.unlock();
if ((param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) &&
(value != 0)) {
- for (size_t i = 0; i < mRecordThreads.size(); i++) {
- mRecordThreads.valueAt(i)->setParameters(keyValuePairs);
+ for (size_t i = 0; i < recordThreads.size(); i++) {
+ recordThreads.valueAt(i)->setParameters(keyValuePairs);
}
}
+ mLock.lock();
}
+ mLock.unlock();
}
if (thread != 0) {
return thread->setParameters(keyValuePairs);
@@ -991,7 +1299,11 @@ size_t AudioFlinger::getInputBufferSize(uint32_t sampleRate, audio_format_t form
format: format,
};
audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
+#ifndef ICS_AUDIO_BLOB
size_t size = dev->get_input_buffer_size(dev, &config);
+#else
+ size_t size = dev->get_input_buffer_size(dev, sampleRate, format, popcount(channelMask));
+#endif
mHardwareStatus = AUDIO_HW_IDLE;
return size;
}
@@ -1048,14 +1360,14 @@ void AudioFlinger::registerClient(const sp<IAudioFlingerClient>& client)
Mutex::Autolock _l(mLock);
- pid_t pid = IPCThreadState::self()->getCallingPid();
- if (mNotificationClients.indexOfKey(pid) < 0) {
+ sp<IBinder> binder = client->asBinder();
+ if (mNotificationClients.indexOfKey(binder) < 0) {
sp<NotificationClient> notificationClient = new NotificationClient(this,
client,
- pid);
- ALOGV("registerClient() client %p, pid %d", notificationClient.get(), pid);
+ binder);
+ ALOGV("registerClient() client %p, binder %d", notificationClient.get(), binder.get());
- mNotificationClients.add(pid, notificationClient);
+ mNotificationClients.add(binder, notificationClient);
sp<IBinder> binder = client->asBinder();
binder->linkToDeath(notificationClient);
@@ -1070,14 +1382,39 @@ void AudioFlinger::registerClient(const sp<IAudioFlingerClient>& client)
mRecordThreads.valueAt(i)->sendIoConfigEvent(AudioSystem::INPUT_OPENED);
}
}
+#ifdef QCOM_HARDWARE
+ // Send the notification to the client only once.
+ if (mA2DPHandle != -1) {
+ ALOGV("A2DP active. Notifying the registered client");
+ client->ioConfigChanged(AudioSystem::A2DP_OUTPUT_STATE, mA2DPHandle, &mA2DPHandle);
+ }
+#endif
}
-void AudioFlinger::removeNotificationClient(pid_t pid)
+#ifdef QCOM_HARDWARE
+status_t AudioFlinger::deregisterClient(const sp<IAudioFlingerClient>& client)
{
+ ALOGV("deregisterClient() %p, tid %d, calling tid %d", client.get(), gettid(), IPCThreadState::self()->getCallingPid());
Mutex::Autolock _l(mLock);
- mNotificationClients.removeItem(pid);
+ sp<IBinder> binder = client->asBinder();
+ int index = mNotificationClients.indexOfKey(binder);
+ if (index >= 0) {
+ mNotificationClients.removeItemsAt(index);
+ return true;
+ }
+ return false;
+}
+#endif
+
+void AudioFlinger::removeNotificationClient(sp<IBinder> binder)
+{
+ Mutex::Autolock _l(mLock);
+
+ mNotificationClients.removeItem(binder);
+
+ int pid = IPCThreadState::self()->getCallingPid();
ALOGV("%d died, releasing its sessions", pid);
size_t num = mAudioSessionRefs.size();
bool removed = false;
@@ -1102,6 +1439,12 @@ void AudioFlinger::removeNotificationClient(pid_t pid)
// audioConfigChanged_l() must be called with AudioFlinger::mLock held
void AudioFlinger::audioConfigChanged_l(int event, audio_io_handle_t ioHandle, const void *param2)
{
+#ifdef QCOM_HARDWARE
+ ALOGV("AudioFlinger::audioConfigChanged_l: event %d", event);
+ if (event == AudioSystem::EFFECT_CONFIG_CHANGED) {
+ mIsEffectConfigChanged = true;
+ }
+#endif
size_t size = mNotificationClients.size();
for (size_t i = 0; i < size; i++) {
mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event, ioHandle,
@@ -1204,6 +1547,15 @@ status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
return status;
}
+#ifdef QCOM_HARDWARE
+void AudioFlinger::ThreadBase::effectConfigChanged() {
+ mAudioFlinger->mLock.lock();
+ ALOGV("New effect is being added to LPA chain, Notifying LPA Direct Track");
+ mAudioFlinger->audioConfigChanged_l(AudioSystem::EFFECT_CONFIG_CHANGED, 0, NULL);
+ mAudioFlinger->mLock.unlock();
+}
+#endif
+
void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param)
{
Mutex::Autolock _l(mLock);
@@ -2652,7 +3004,10 @@ bool AudioFlinger::PlaybackThread::threadLoop()
// only process effects if we're going to write
if (sleepTime == 0) {
for (size_t i = 0; i < effectChains.size(); i ++) {
- effectChains[i]->process_l();
+#ifdef QCOM_HARDWARE
+ if (effectChains[i] != mAudioFlinger->mLPAEffectChain)
+#endif
+ effectChains[i]->process_l();
}
}
@@ -2844,11 +3199,13 @@ void AudioFlinger::MixerThread::threadLoop_mix()
int64_t pts;
status_t status = INVALID_OPERATION;
+#ifndef ICS_AUDIO_BLOB
if (mNormalSink != 0) {
status = mNormalSink->getNextWriteTimestamp(&pts);
} else {
status = mOutputSink->getNextWriteTimestamp(&pts);
}
+#endif
if (status != NO_ERROR) {
pts = AudioBufferProvider::kInvalidPTS;
@@ -5775,8 +6132,8 @@ void AudioFlinger::Client::releaseTimedTrack()
AudioFlinger::NotificationClient::NotificationClient(const sp<AudioFlinger>& audioFlinger,
const sp<IAudioFlingerClient>& client,
- pid_t pid)
- : mAudioFlinger(audioFlinger), mPid(pid), mAudioFlingerClient(client)
+ sp<IBinder> binder)
+ : mAudioFlinger(audioFlinger), mBinder(binder), mAudioFlingerClient(client)
{
}
@@ -5787,9 +6144,313 @@ AudioFlinger::NotificationClient::~NotificationClient()
void AudioFlinger::NotificationClient::binderDied(const wp<IBinder>& who)
{
sp<NotificationClient> keep(this);
- mAudioFlinger->removeNotificationClient(mPid);
+ mAudioFlinger->removeNotificationClient(mBinder);
+}
+
+// ----------------------------------------------------------------------------
+#ifdef QCOM_HARDWARE
+AudioFlinger::DirectAudioTrack::DirectAudioTrack(const sp<AudioFlinger>& audioFlinger,
+ int output, AudioSessionDescriptor *outputDesc,
+ IDirectTrackClient* client, audio_output_flags_t outflag)
+ : BnDirectTrack(), mIsPaused(false), mAudioFlinger(audioFlinger), mOutput(output), mOutputDesc(outputDesc),
+ mClient(client), mEffectConfigChanged(false), mKillEffectsThread(false), mFlag(outflag)
+{
+ if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
+ createEffectThread();
+
+ mAudioFlingerClient = new AudioFlingerDirectTrackClient(this);
+ mAudioFlinger->registerClient(mAudioFlingerClient);
+
+ allocateBufPool();
+ }
+ mDeathRecipient = new PMDeathRecipient(this);
+ acquireWakeLock();
+}
+
+AudioFlinger::DirectAudioTrack::~DirectAudioTrack() {
+ if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
+ requestAndWaitForEffectsThreadExit();
+ mAudioFlinger->deregisterClient(mAudioFlingerClient);
+ mAudioFlinger->deleteEffectSession();
+ deallocateBufPool();
+ }
+ AudioSystem::releaseOutput(mOutput);
+ releaseWakeLock();
+
+ {
+ Mutex::Autolock _l(pmLock);
+ if (mPowerManager != 0) {
+ sp<IBinder> binder = mPowerManager->asBinder();
+ binder->unlinkToDeath(mDeathRecipient);
+ }
+ }
+}
+
+status_t AudioFlinger::DirectAudioTrack::start() {
+ if(mIsPaused) {
+ mIsPaused = false;
+ mOutputDesc->stream->start(mOutputDesc->stream);
+ }
+ mOutputDesc->mActive = true;
+ AudioSystem::startOutput(mOutput, (audio_stream_type_t)mOutputDesc->mStreamType);
+ return NO_ERROR;
+}
+
+void AudioFlinger::DirectAudioTrack::stop() {
+ mOutputDesc->mActive = false;
+ mOutputDesc->stream->stop(mOutputDesc->stream);
+ AudioSystem::stopOutput(mOutput, (audio_stream_type_t)mOutputDesc->mStreamType);
+}
+
+void AudioFlinger::DirectAudioTrack::pause() {
+ if(!mIsPaused) {
+ mIsPaused = true;
+ mOutputDesc->stream->pause(mOutputDesc->stream);
+ mOutputDesc->mActive = false;
+ AudioSystem::stopOutput(mOutput, (audio_stream_type_t)mOutputDesc->mStreamType);
+ }
+}
+
+ssize_t AudioFlinger::DirectAudioTrack::write(const void *buffer, size_t size) {
+ ALOGV("Writing to AudioSessionOut");
+ int isAvail = 0;
+ mOutputDesc->stream->is_buffer_available(mOutputDesc->stream, &isAvail);
+ if (!isAvail) {
+ return 0;
+ }
+
+ if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
+ mEffectLock.lock();
+ List<BufferInfo>::iterator it = mEffectsPool.begin();
+ BufferInfo buf = *it;
+ mEffectsPool.erase(it);
+ memcpy((char *) buf.localBuf, (char *)buffer, size);
+ buf.bytesToWrite = size;
+ mEffectsPool.push_back(buf);
+ mAudioFlinger->applyEffectsOn(static_cast<void *>(this), (int16_t*)buf.localBuf,(int16_t*)buffer,(int)size);
+ mEffectLock.unlock();
+ }
+ return mOutputDesc->stream->write(mOutputDesc->stream, buffer, size);
+}
+
+void AudioFlinger::DirectAudioTrack::flush() {
+ if (mFlag & AUDIO_OUTPUT_FLAG_LPA) {
+ mEffectsPool.clear();
+ mEffectsPool = mBufPool;
+ }
+ mOutputDesc->stream->flush(mOutputDesc->stream);
+}
+
+void AudioFlinger::DirectAudioTrack::mute(bool muted) {
}
+void AudioFlinger::DirectAudioTrack::setVolume(float left, float right) {
+ mOutputDesc->mVolumeLeft = left;
+ mOutputDesc->mVolumeRight = right;
+}
+
+int64_t AudioFlinger::DirectAudioTrack::getTimeStamp() {
+ int64_t time;
+ mOutputDesc->stream->get_next_write_timestamp(mOutputDesc->stream, &time);
+ ALOGV("Timestamp %lld",time);
+ return time;
+}
+
+void AudioFlinger::DirectAudioTrack::postEOS(int64_t delayUs) {
+ ALOGV("Notify Audio Track of EOS event");
+ mClient->notify(DIRECT_TRACK_EOS);
+}
+
+void AudioFlinger::DirectAudioTrack::allocateBufPool() {
+ void *dsp_buf = NULL;
+ void *local_buf = NULL;
+
+ //1. get the ion buffer information
+ struct buf_info* buf = NULL;
+ mOutputDesc->stream->get_buffer_info(mOutputDesc->stream, &buf);
+ ALOGV("get buffer info %p",buf);
+ if (!buf) {
+ ALOGV("buffer is NULL");
+ return;
+ }
+ int nSize = buf->bufsize;
+ int bufferCount = buf->nBufs;
+
+ //2. allocate the buffer pool, allocate local buffers
+ for (int i = 0; i < bufferCount; i++) {
+ dsp_buf = (void *)buf->buffers[i];
+ local_buf = malloc(nSize);
+ memset(local_buf, 0, nSize);
+ // Store this information for internal mapping / maintanence
+ BufferInfo buf(local_buf, dsp_buf, nSize);
+ buf.bytesToWrite = 0;
+ mBufPool.push_back(buf);
+ mEffectsPool.push_back(buf);
+
+ ALOGV("The MEM that is allocated buffer is %x, size %d",(unsigned int)dsp_buf,nSize);
+ }
+ free(buf);
+}
+
+void AudioFlinger::DirectAudioTrack::deallocateBufPool() {
+
+ //1. Deallocate the local memory
+ //2. Remove all the buffers from bufpool
+ while (!mBufPool.empty()) {
+ List<BufferInfo>::iterator it = mBufPool.begin();
+ BufferInfo &memBuffer = *it;
+ // free the local buffer corresponding to mem buffer
+ if (memBuffer.localBuf) {
+ free(memBuffer.localBuf);
+ memBuffer.localBuf = NULL;
+ }
+ ALOGV("Removing from bufpool");
+ mBufPool.erase(it);
+ }
+}
+
+status_t AudioFlinger::DirectAudioTrack::onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+ return BnDirectTrack::onTransact(code, data, reply, flags);
+}
+
+void *AudioFlinger::DirectAudioTrack::EffectsThreadWrapper(void *me) {
+ static_cast<DirectAudioTrack *>(me)->EffectsThreadEntry();
+ return NULL;
+}
+
+void AudioFlinger::DirectAudioTrack::EffectsThreadEntry() {
+ while(1) {
+ mEffectLock.lock();
+ if (!mEffectConfigChanged && !mKillEffectsThread) {
+ mEffectCv.wait(mEffectLock);
+ }
+
+ if(mKillEffectsThread) {
+ mEffectLock.unlock();
+ break;
+ }
+
+ if (mEffectConfigChanged) {
+ mEffectConfigChanged = false;
+ for ( List<BufferInfo>::iterator it = mEffectsPool.begin();
+ it != mEffectsPool.end(); it++) {
+ ALOGV("Apply effects on the buffer dspbuf %p, mEffectsPool.size() %d",it->dspBuf,mEffectsPool.size());
+ mAudioFlinger->applyEffectsOn(static_cast<void *>(this),
+ (int16_t *)it->localBuf,
+ (int16_t *)it->dspBuf,
+ it->bytesToWrite);
+ if (mEffectConfigChanged) {
+ break;
+ }
+ }
+
+ }
+ mEffectLock.unlock();
+ }
+ ALOGV("Effects thread is dead");
+ mEffectsThreadAlive = false;
+}
+
+void AudioFlinger::DirectAudioTrack::requestAndWaitForEffectsThreadExit() {
+ if (!mEffectsThreadAlive)
+ return;
+ mKillEffectsThread = true;
+ mEffectCv.signal();
+ pthread_join(mEffectsThread,NULL);
+ ALOGV("effects thread killed");
+}
+
+void AudioFlinger::DirectAudioTrack::createEffectThread() {
+ //Create the effects thread
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+ mEffectsThreadAlive = true;
+ ALOGV("Creating Effects Thread");
+ pthread_create(&mEffectsThread, &attr, EffectsThreadWrapper, this);
+}
+AudioFlinger::DirectAudioTrack::AudioFlingerDirectTrackClient::AudioFlingerDirectTrackClient(void *obj)
+{
+ ALOGV("AudioFlinger::DirectAudioTrack::AudioFlingerDirectTrackClient");
+ pBaseClass = (DirectAudioTrack*)obj;
+}
+
+void AudioFlinger::DirectAudioTrack::AudioFlingerDirectTrackClient::binderDied(const wp<IBinder>& who) {
+ pBaseClass->mAudioFlinger.clear();
+ ALOGW("AudioFlinger server died!");
+}
+
+void AudioFlinger::DirectAudioTrack::AudioFlingerDirectTrackClient
+ ::ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) {
+ ALOGV("ioConfigChanged() event %d", event);
+ if (event == AudioSystem::EFFECT_CONFIG_CHANGED) {
+ ALOGV("Received notification for change in effect module");
+ // Seek to current media time - flush the decoded buffers with the driver
+ pBaseClass->mEffectConfigChanged = true;
+ // Signal effects thread to re-apply effects
+ ALOGV("Signalling Effects Thread");
+ pBaseClass->mEffectCv.signal();
+
+ }
+ ALOGV("ioConfigChanged Out");
+}
+
+void AudioFlinger::DirectAudioTrack::acquireWakeLock()
+{
+ Mutex::Autolock _l(pmLock);
+
+ if (mPowerManager == 0) {
+ // use checkService() to avoid blocking if power service is not up yet
+ sp<IBinder> binder =
+ defaultServiceManager()->checkService(String16("power"));
+ if (binder == 0) {
+ ALOGW("Thread %s cannot connect to the power manager service", lockName);
+ } else {
+ mPowerManager = interface_cast<IPowerManager>(binder);
+ binder->linkToDeath(mDeathRecipient);
+ }
+ }
+ if (mPowerManager != 0 && mWakeLockToken == 0) {
+ sp<IBinder> binder = new BBinder();
+ status_t status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
+ binder,
+ String16(lockName));
+ if (status == NO_ERROR) {
+ mWakeLockToken = binder;
+ }
+ ALOGV("acquireWakeLock() status %d", status);
+ }
+}
+
+void AudioFlinger::DirectAudioTrack::releaseWakeLock()
+{
+ Mutex::Autolock _l(pmLock);
+
+ if (mWakeLockToken != 0) {
+ ALOGV("releaseWakeLock()");
+ if (mPowerManager != 0) {
+ mPowerManager->releaseWakeLock(mWakeLockToken, 0);
+ }
+ mWakeLockToken.clear();
+ }
+}
+
+void AudioFlinger::DirectAudioTrack::clearPowerManager()
+{
+ releaseWakeLock();
+ Mutex::Autolock _l(pmLock);
+ mPowerManager.clear();
+}
+
+void AudioFlinger::DirectAudioTrack::PMDeathRecipient::binderDied(const wp<IBinder>& who)
+{
+ parentClass->clearPowerManager();
+ ALOGW("power manager service died !!!");
+}
+#endif
+
// ----------------------------------------------------------------------------
AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
@@ -5992,7 +6653,7 @@ AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,
ThreadBase(audioFlinger, id, AUDIO_DEVICE_NONE, device, RECORD),
mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),
// mRsmpInIndex and mInputBytes set by readInputParameters()
- mReqChannelCount(popcount(channelMask)),
+ mReqChannelCount(getInputChannelCount(channelMask)),
mReqSampleRate(sampleRate)
// mBytesRead is only meaningful while active, and so is cleared in start()
// (but might be better to also clear here for dump?)
@@ -6607,7 +7268,7 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l()
reconfig = true;
}
if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
- reqChannelCount = popcount(value);
+ reqChannelCount = getInputChannelCount(value);
reconfig = true;
}
if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
@@ -6668,7 +7329,7 @@ bool AudioFlinger::RecordThread::checkForNewParameters_l()
reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
((int)mInput->stream->common.get_sample_rate(&mInput->stream->common) <= (2 * reqSamplingRate)) &&
- popcount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 &&
+ getInputChannelCount(mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 &&
(reqChannelCount <= FCC_2)) {
status = NO_ERROR;
}
@@ -6739,7 +7400,7 @@ void AudioFlinger::RecordThread::readInputParameters()
mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
- mChannelCount = (uint16_t)popcount(mChannelMask);
+ mChannelCount = (uint16_t)getInputChannelCount(mChannelMask);
mFormat = mInput->stream->common.get_format(&mInput->stream->common);
mFrameSize = audio_stream_frame_size(&mInput->stream->common);
mInputBytes = mInput->stream->common.get_buffer_size(&mInput->stream->common);
@@ -6877,6 +7538,7 @@ audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
{ // scope for auto-lock pattern
AutoMutex lock(mHardwareLock);
+#if !defined(ICS_AUDIO_BLOB) && !defined(MR0_AUDIO_BLOB)
if (0 == mAudioHwDevs.size()) {
mHardwareStatus = AUDIO_HW_GET_MASTER_VOLUME;
if (NULL != dev->get_master_volume) {
@@ -6894,6 +7556,7 @@ audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
}
}
}
+#endif
mHardwareStatus = AUDIO_HW_SET_MASTER_VOLUME;
if ((NULL != dev->set_master_volume) &&
@@ -6902,12 +7565,14 @@ audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
AudioHwDevice::AHWD_CAN_SET_MASTER_VOLUME);
}
+#if !defined(ICS_AUDIO_BLOB) && !defined(MR0_AUDIO_BLOB)
mHardwareStatus = AUDIO_HW_SET_MASTER_MUTE;
if ((NULL != dev->set_master_mute) &&
(OK == dev->set_master_mute(dev, mMasterMute))) {
flags = static_cast<AudioHwDevice::Flags>(flags |
AudioHwDevice::AHWD_CAN_SET_MASTER_MUTE);
}
+#endif
mHardwareStatus = AUDIO_HW_IDLE;
}
@@ -6981,12 +7646,23 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
+#ifndef ICS_AUDIO_BLOB
status = hwDevHal->open_output_stream(hwDevHal,
id,
*pDevices,
(audio_output_flags_t)flags,
&config,
&outStream);
+#else
+ status = hwDevHal->open_output_stream(hwDevHal,
+ *pDevices,
+ (int *)&config.format,
+ &config.channel_mask,
+ &config.sample_rate,
+ &outStream);
+ uint32_t newflags = flags | AUDIO_OUTPUT_FLAG_PRIMARY;
+ flags = (audio_output_flags_t)newflags;
+#endif
mHardwareStatus = AUDIO_HW_IDLE;
ALOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",
@@ -6998,7 +7674,18 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
if (status == NO_ERROR && outStream != NULL) {
AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream);
-
+#ifdef QCOM_HARDWARE
+ if (flags & AUDIO_OUTPUT_FLAG_LPA || flags & AUDIO_OUTPUT_FLAG_TUNNEL ) {
+ AudioSessionDescriptor *desc = new AudioSessionDescriptor(hwDevHal, outStream, flags);
+ desc->mActive = true;
+ //TODO: no stream type
+ //desc->mStreamType = streamType;
+ desc->mVolumeLeft = 1.0;
+ desc->mVolumeRight = 1.0;
+ desc->device = *pDevices;
+ mDirectAudioTracks.add(id, desc);
+ } else
+#endif
if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) ||
(config.format != AUDIO_FORMAT_PCM_16_BIT) ||
(config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) {
@@ -7008,16 +7695,39 @@ audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
thread = new MixerThread(this, output, id, *pDevices);
ALOGV("openOutput() created mixer output: ID %d thread %p", id, thread);
}
- mPlaybackThreads.add(id, thread);
+#ifdef QCOM_HARDWARE
+ if (thread != NULL)
+#endif
+ mPlaybackThreads.add(id, thread);
+
+#ifdef QCOM_HARDWARE
+ // if the device is a A2DP, then this is an A2DP Output
+ if ( true == audio_is_a2dp_device((audio_devices_t) *pDevices) )
+ {
+ mA2DPHandle = id;
+ ALOGV("A2DP device activated. The handle is set to %d", mA2DPHandle);
+ }
+#endif
if (pSamplingRate != NULL) *pSamplingRate = config.sample_rate;
if (pFormat != NULL) *pFormat = config.format;
if (pChannelMask != NULL) *pChannelMask = config.channel_mask;
- if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
-
- // notify client processes of the new output creation
- thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
-
+#ifdef QCOM_HARDWARE
+ if (thread != NULL) {
+#endif
+ if (pLatencyMs != NULL) *pLatencyMs = thread->latency();
+ // notify client processes of the new output creation
+ thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
+#ifdef QCOM_HARDWARE
+ }
+ else {
+ *pLatencyMs = 0;
+ if ((flags & AUDIO_OUTPUT_FLAG_LPA) || (flags & AUDIO_OUTPUT_FLAG_TUNNEL)) {
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(id);
+ *pLatencyMs = desc->stream->get_latency(desc->stream);
+ }
+ }
+#endif
// the first primary output opened designates the primary hw device
if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
ALOGI("Using module %d has the primary audio interface", module);
@@ -7064,6 +7774,21 @@ status_t AudioFlinger::closeOutput_nonvirtual(audio_io_handle_t output)
{
// keep strong reference on the playback thread so that
// it is not destroyed while exit() is executed
+#ifdef QCOM_HARDWARE
+ AudioSessionDescriptor *desc = mDirectAudioTracks.valueFor(output);
+ if (desc) {
+ ALOGV("Closing DirectTrack output %d", output);
+ desc->mActive = false;
+ desc->stream->common.standby(&desc->stream->common);
+ desc->hwDev->close_output_stream(desc->hwDev, desc->stream);
+ desc->trackRefPtr = NULL;
+ mDirectAudioTracks.removeItem(output);
+ audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, NULL);
+ delete desc;
+ return NO_ERROR;
+ }
+#endif
+
sp<PlaybackThread> thread;
{
Mutex::Autolock _l(mLock);
@@ -7084,6 +7809,14 @@ status_t AudioFlinger::closeOutput_nonvirtual(audio_io_handle_t output)
}
audioConfigChanged_l(AudioSystem::OUTPUT_CLOSED, output, NULL);
mPlaybackThreads.removeItem(output);
+#ifdef QCOM_HARDWARE
+ if (mA2DPHandle == output)
+ {
+ mA2DPHandle = -1;
+ ALOGV("A2DP OutputClosed Notifying Client");
+ audioConfigChanged_l(AudioSystem::A2DP_OUTPUT_STATE, mA2DPHandle, &mA2DPHandle);
+ }
+#endif
}
thread->exit();
// The thread entity (active unit of execution) is no longer running here,
@@ -7162,8 +7895,16 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,
audio_hw_device_t *inHwHal = inHwDev->hwDevice();
audio_io_handle_t id = nextUniqueId();
+#ifndef ICS_AUDIO_BLOB
status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,
&inStream);
+#else
+ status = inHwHal->open_input_stream(inHwHal, *pDevices,
+ (int *)&config.format,
+ &config.channel_mask,
+ &config.sample_rate, (audio_in_acoustics_t)0,
+ &inStream);
+#endif
ALOGV("openInput() openInputStream returned input %p, SamplingRate %d, Format %d, Channels %x, status %d",
inStream,
config.sample_rate,
@@ -7177,10 +7918,18 @@ audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,
if (status == BAD_VALUE &&
reqFormat == config.format && config.format == AUDIO_FORMAT_PCM_16_BIT &&
(config.sample_rate <= 2 * reqSamplingRate) &&
- (popcount(config.channel_mask) <= FCC_2) && (popcount(reqChannels) <= FCC_2)) {
+ (getInputChannelCount(config.channel_mask) <= FCC_2) && (getInputChannelCount(reqChannels) <= FCC_2)) {
ALOGV("openInput() reopening with proposed sampling rate and channel mask");
inStream = NULL;
+#ifndef ICS_AUDIO_BLOB
status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config, &inStream);
+#else
+ status = inHwHal->open_input_stream(inHwHal, *pDevices,
+ (int *)&config.format,
+ &config.channel_mask,
+ &config.sample_rate, (audio_in_acoustics_t)0,
+ &inStream);
+#endif
}
if (status == NO_ERROR && inStream != NULL) {
@@ -7253,6 +8002,12 @@ status_t AudioFlinger::setStreamOutput(audio_stream_type_t stream, audio_io_hand
PlaybackThread *thread = mPlaybackThreads.valueAt(i).get();
thread->invalidateTracks(stream);
}
+#ifdef QCOM_HARDWARE
+ if ( mA2DPHandle == output ) {
+ ALOGV("A2DP Activated and hence notifying the client");
+ audioConfigChanged_l(AudioSystem::A2DP_OUTPUT_STATE, mA2DPHandle, &output);
+ }
+#endif
return NO_ERROR;
}
@@ -7792,6 +8547,21 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
addEffectChain_l(chain);
chain->setStrategy(getStrategyForSession_l(sessionId));
chainCreated = true;
+#ifdef QCOM_HARDWARE
+ if(sessionId == mAudioFlinger->mLPASessionId) {
+ // Clear reference to previous effect chain if any
+ if(mAudioFlinger->mLPAEffectChain.get()) {
+ mAudioFlinger->mLPAEffectChain.clear();
+ }
+ ALOGV("New EffectChain is created for LPA session ID %d", sessionId);
+ mAudioFlinger->mLPAEffectChain = chain;
+ chain->setLPAFlag(true);
+ // For LPA, the volume will be applied in DSP. No need for volume
+ // control in the Effect chain, so setting it to unity.
+ uint32_t volume = 0x1000000; // Equals to 1.0 in 8.24 format
+ chain->setVolume_l(&volume,&volume);
+ }
+#endif
} else {
effect = chain->getEffectFromDesc_l(desc);
}
@@ -7822,6 +8592,11 @@ sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
effect->setDevice(mInDevice);
effect->setMode(mAudioFlinger->getMode());
effect->setAudioSource(mAudioSource);
+#ifdef QCOM_HARDWARE
+ if(chain == mAudioFlinger->mLPAEffectChain) {
+ effect->setLPAFlag(true);
+ }
+#endif
}
// create effect handle and connect it to effect module
handle = new EffectHandle(effect, client, effectClient, priority);
@@ -7928,7 +8703,10 @@ void AudioFlinger::ThreadBase::lockEffectChains_l(
{
effectChains = mEffectChains;
for (size_t i = 0; i < mEffectChains.size(); i++) {
- mEffectChains[i]->lock();
+#ifdef QCOM_HARDWARE
+ if (mEffectChains[i] != mAudioFlinger->mLPAEffectChain)
+#endif
+ mEffectChains[i]->lock();
}
}
@@ -7936,7 +8714,10 @@ void AudioFlinger::ThreadBase::unlockEffectChains(
const Vector< sp<AudioFlinger::EffectChain> >& effectChains)
{
for (size_t i = 0; i < effectChains.size(); i++) {
- effectChains[i]->unlock();
+#ifdef QCOM_HARDWARE
+ if (mEffectChains[i] != mAudioFlinger->mLPAEffectChain)
+#endif
+ effectChains[i]->unlock();
}
}
@@ -8168,6 +8949,9 @@ AudioFlinger::EffectModule::EffectModule(ThreadBase *thread,
// mMaxDisableWaitCnt is set by configure() and not used before then
// mDisableWaitCnt is set by process() and updateState() and not used before then
mSuspended(false)
+#ifdef QCOM_HARDWARE
+ ,mIsForLPA(false)
+#endif
{
ALOGV("Constructor %p", this);
int lStatus;
@@ -8293,6 +9077,9 @@ AudioFlinger::EffectHandle *AudioFlinger::EffectModule::controlHandle_l()
size_t AudioFlinger::EffectModule::disconnect(EffectHandle *handle, bool unpinIfLast)
{
+#ifdef QCOM_HARDWARE
+ setEnabled(false);
+#endif
ALOGV("disconnect() %p handle %p", this, handle);
// keep a strong reference on this EffectModule to avoid calling the
// destructor before we exit
@@ -8399,8 +9186,19 @@ void AudioFlinger::EffectModule::reset_l()
(*mEffectInterface)->command(mEffectInterface, EFFECT_CMD_RESET, 0, NULL, 0, NULL);
}
+#ifndef QCOM_HARDWARE
status_t AudioFlinger::EffectModule::configure()
{
+#else
+status_t AudioFlinger::EffectModule::configure(bool isForLPA, int sampleRate, int channelCount, int frameCount)
+{
+ uint32_t channels;
+
+ // Acquire lock here to make sure that any other thread does not delete
+ // the effect handle and release the effect module.
+ Mutex::Autolock _l(mLock);
+#endif
+
if (mEffectInterface == NULL) {
return NO_INIT;
}
@@ -8412,6 +9210,23 @@ status_t AudioFlinger::EffectModule::configure()
// TODO: handle configuration of effects replacing track process
audio_channel_mask_t channelMask = thread->channelMask();
+#ifdef QCOM_HARDWARE
+ mIsForLPA = isForLPA;
+ if(isForLPA) {
+ if (channelCount == 1) {
+ channels = AUDIO_CHANNEL_OUT_MONO;
+ } else {
+ channels = AUDIO_CHANNEL_OUT_STEREO;
+ }
+ ALOGV("%s: LPA ON - channels %d", __func__, channels);
+ } else {
+ if (thread->channelCount() == 1) {
+ channels = AUDIO_CHANNEL_OUT_MONO;
+ } else {
+ channels = AUDIO_CHANNEL_OUT_STEREO;
+ }
+ }
+#endif
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
@@ -8421,7 +9236,13 @@ status_t AudioFlinger::EffectModule::configure()
mConfig.outputCfg.channels = channelMask;
mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
- mConfig.inputCfg.samplingRate = thread->sampleRate();
+#ifdef QCOM_HARDWARE
+ if(isForLPA){
+ mConfig.inputCfg.samplingRate = sampleRate;
+ ALOGV("%s: LPA ON - sampleRate %d", __func__, sampleRate);
+ } else
+#endif
+ mConfig.inputCfg.samplingRate = thread->sampleRate();
mConfig.outputCfg.samplingRate = mConfig.inputCfg.samplingRate;
mConfig.inputCfg.bufferProvider.cookie = NULL;
mConfig.inputCfg.bufferProvider.getBuffer = NULL;
@@ -8446,7 +9267,13 @@ status_t AudioFlinger::EffectModule::configure()
}
mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
- mConfig.inputCfg.buffer.frameCount = thread->frameCount();
+#ifdef QCOM_HARDWARE
+ if(isForLPA) {
+ mConfig.inputCfg.buffer.frameCount = frameCount;
+ ALOGV("%s: LPA ON - frameCount %d", __func__, frameCount);
+ } else
+#endif
+ mConfig.inputCfg.buffer.frameCount = thread->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
ALOGV("configure() %p thread %p buffer %p framecount %d",
@@ -8626,10 +9453,15 @@ status_t AudioFlinger::EffectModule::setEnabled(bool enabled)
// must be called with EffectModule::mLock held
status_t AudioFlinger::EffectModule::setEnabled_l(bool enabled)
{
-
+#ifdef QCOM_HARDWARE
+ bool effectStateChanged = false;
+#endif
ALOGV("setEnabled %p enabled %d", this, enabled);
if (enabled != isEnabled()) {
+#ifdef QCOM_HARDWARE
+ effectStateChanged = true;
+#endif
status_t status = AudioSystem::setEffectEnabled(mId, enabled);
if (enabled && status != NO_ERROR) {
return status;
@@ -8667,6 +9499,16 @@ status_t AudioFlinger::EffectModule::setEnabled_l(bool enabled)
}
}
}
+#ifdef QCOM_HARDWARE
+ /*
+ Send notification event to LPA Player when an effect for
+ LPA output is enabled or disabled.
+ */
+ if (effectStateChanged && mIsForLPA) {
+ sp<ThreadBase> thread = mThread.promote();
+ thread->effectConfigChanged();
+ }
+#endif
return NO_ERROR;
}
@@ -9116,6 +9958,18 @@ status_t AudioFlinger::EffectHandle::command(uint32_t cmdCode,
return disable();
}
+#ifdef QCOM_HARDWARE
+ ALOGV("EffectHandle::command: isOnLPA %d", mEffect->isOnLPA());
+ if(mEffect->isOnLPA() &&
+ ((cmdCode == EFFECT_CMD_SET_PARAM) || (cmdCode == EFFECT_CMD_SET_PARAM_DEFERRED) ||
+ (cmdCode == EFFECT_CMD_SET_PARAM_COMMIT) || (cmdCode == EFFECT_CMD_SET_DEVICE) ||
+ (cmdCode == EFFECT_CMD_SET_VOLUME) || (cmdCode == EFFECT_CMD_SET_AUDIO_MODE)) ) {
+ // Notify Direct track for the change in Effect module
+ // TODO: check if it is required to send mLPAHandle
+ ALOGV("Notifying Direct Track for the change in effect config");
+ mClient->audioFlinger()->audioConfigChanged_l(AudioSystem::EFFECT_CONFIG_CHANGED, 0, NULL);
+ }
+#endif
return mEffect->command(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
}
@@ -9184,6 +10038,9 @@ AudioFlinger::EffectChain::EffectChain(ThreadBase *thread,
: mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX)
+#ifdef QCOM_HARDWARE
+ ,mIsForLPATrack(false)
+#endif
{
mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
if (thread == NULL) {
@@ -9228,6 +10085,20 @@ sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromId_l(int
return 0;
}
+#ifdef QCOM_HARDWARE
+sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromIndex_l(int idx)
+{
+ sp<EffectModule> effect = NULL;
+ if(idx < 0 || idx >= mEffects.size()) {
+ ALOGE("EffectChain::getEffectFromIndex_l: invalid index %d", idx);
+ }
+ if(mEffects.size() > 0){
+ effect = mEffects[idx];
+ }
+ return effect;
+}
+#endif
+
// getEffectFromType_l() must be called with ThreadBase::mLock held
sp<AudioFlinger::EffectModule> AudioFlinger::EffectChain::getEffectFromType_l(
const effect_uuid_t *type)
@@ -9294,7 +10165,11 @@ void AudioFlinger::EffectChain::process_l()
}
size_t size = mEffects.size();
+#ifdef QCOM_HARDWARE
+ if (doProcess || isForLPATrack()) {
+#else
if (doProcess) {
+#endif
for (size_t i = 0; i < size; i++) {
mEffects[i]->process();
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 49e2b2c..dcdc55c 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -1,6 +1,9 @@
/*
**
** Copyright 2007, The Android Open Source Project
+** Copyright (c) 2012, The Linux Foundation. All rights reserved.
+** Not a Contribution, Apache license notifications and license are retained
+** for attribution purposes only.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -26,6 +29,10 @@
#include <media/IAudioFlinger.h>
#include <media/IAudioFlingerClient.h>
+#ifdef QCOM_HARDWARE
+#include <media/IDirectTrack.h>
+#include <media/IDirectTrackClient.h>
+#endif
#include <media/IAudioTrack.h>
#include <media/IAudioRecord.h>
#include <media/AudioSystem.h>
@@ -52,6 +59,7 @@
#include "AudioWatchdog.h"
#include <powermanager/IPowerManager.h>
+#include <utils/List.h>
namespace android {
@@ -99,6 +107,19 @@ public:
pid_t tid,
int *sessionId,
status_t *status);
+#ifdef QCOM_HARDWARE
+ virtual sp<IDirectTrack> createDirectTrack(
+ pid_t pid,
+ uint32_t sampleRate,
+ audio_channel_mask_t channelMask,
+ audio_io_handle_t output,
+ int *sessionId,
+ IDirectTrackClient* client,
+ audio_stream_type_t streamType,
+ status_t *status);
+
+ virtual void deleteEffectSession();
+#endif
virtual sp<IAudioRecord> openRecord(
pid_t pid,
@@ -141,7 +162,9 @@ public:
virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const;
virtual void registerClient(const sp<IAudioFlingerClient>& client);
-
+#ifdef QCOM_HARDWARE
+ virtual status_t deregisterClient(const sp<IAudioFlingerClient>& client);
+#endif
virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
audio_channel_mask_t channelMask) const;
@@ -216,6 +239,13 @@ public:
Parcel* reply,
uint32_t flags);
+#ifdef QCOM_HARDWARE
+ void applyEffectsOn(void *token,
+ int16_t *buffer1,
+ int16_t *buffer2,
+ int size);
+#endif
+
// end of IAudioFlinger interface
class SyncEvent;
@@ -314,7 +344,7 @@ private:
public:
NotificationClient(const sp<AudioFlinger>& audioFlinger,
const sp<IAudioFlingerClient>& client,
- pid_t pid);
+ sp<IBinder> binder);
virtual ~NotificationClient();
sp<IAudioFlingerClient> audioFlingerClient() const { return mAudioFlingerClient; }
@@ -327,7 +357,7 @@ private:
NotificationClient& operator = (const NotificationClient&);
const sp<AudioFlinger> mAudioFlinger;
- const pid_t mPid;
+ sp<IBinder> mBinder;
const sp<IAudioFlingerClient> mAudioFlingerClient;
};
@@ -343,6 +373,9 @@ private:
class EffectModule;
class EffectHandle;
class EffectChain;
+#ifdef QCOM_HARDWARE
+ struct AudioSessionDescriptor;
+#endif
struct AudioStreamOut;
struct AudioStreamIn;
@@ -570,6 +603,9 @@ private:
virtual status_t setParameters(const String8& keyValuePairs);
virtual String8 getParameters(const String8& keys) = 0;
virtual void audioConfigChanged_l(int event, int param = 0) = 0;
+#ifdef QCOM_HARDWARE
+ void effectConfigChanged();
+#endif
void sendIoConfigEvent(int event, int param = 0);
void sendIoConfigEvent_l(int event, int param = 0);
void sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
@@ -1400,6 +1436,115 @@ private:
sp<PlaybackThread> getEffectThread_l(int sessionId, int EffectId);
// server side of the client's IAudioTrack
+#ifdef QCOM_HARDWARE
+ class DirectAudioTrack : public android::BnDirectTrack,
+ public AudioEventObserver
+ {
+ public:
+ DirectAudioTrack(const sp<AudioFlinger>& audioFlinger,
+ int output, AudioSessionDescriptor *outputDesc,
+ IDirectTrackClient* client, audio_output_flags_t outflag);
+ virtual ~DirectAudioTrack();
+ virtual status_t start();
+ virtual void stop();
+ virtual void flush();
+ virtual void mute(bool);
+ virtual void pause();
+ virtual ssize_t write(const void *buffer, size_t bytes);
+ virtual void setVolume(float left, float right);
+ virtual int64_t getTimeStamp();
+ virtual void postEOS(int64_t delayUs);
+
+ virtual status_t onTransact(
+ uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
+ private:
+
+ IDirectTrackClient* mClient;
+ AudioSessionDescriptor *mOutputDesc;
+ int mOutput;
+ bool mIsPaused;
+ audio_output_flags_t mFlag;
+
+ class BufferInfo {
+ public:
+ BufferInfo(void *buf1, void *buf2, int32_t nSize) :
+ localBuf(buf1), dspBuf(buf2), memBufsize(nSize)
+ {}
+
+ void *localBuf;
+ void *dspBuf;
+ uint32_t memBufsize;
+ uint32_t bytesToWrite;
+ };
+ List<BufferInfo> mBufPool;
+ List<BufferInfo> mEffectsPool;
+
+ void allocateBufPool();
+ void deallocateBufPool();
+
+ //******Effects*************
+ static void *EffectsThreadWrapper(void *me);
+ void EffectsThreadEntry();
+ // make sure the Effects thread also exited
+ void requestAndWaitForEffectsThreadExit();
+ void createEffectThread();
+ Condition mEffectCv;
+ Mutex mEffectLock;
+ pthread_t mEffectsThread;
+ bool mKillEffectsThread;
+ bool mEffectsThreadAlive;
+ bool mEffectConfigChanged;
+
+ //Structure to recieve the Effect notification from the flinger.
+ class AudioFlingerDirectTrackClient: public IBinder::DeathRecipient, public BnAudioFlingerClient {
+ public:
+ AudioFlingerDirectTrackClient(void *obj);
+
+ DirectAudioTrack *pBaseClass;
+ // DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ // IAudioFlingerClient
+
+ // indicate a change in the configuration of an output or input: keeps the cached
+ // values for output/input parameters upto date in client process
+ virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+
+ friend class DirectAudioTrack;
+ };
+ // helper function to obtain AudioFlinger service handle
+ sp<AudioFlinger> mAudioFlinger;
+ sp<AudioFlingerDirectTrackClient> mAudioFlingerClient;
+
+ void clearPowerManager();
+ class PMDeathRecipient : public IBinder::DeathRecipient {
+ public:
+ PMDeathRecipient(void *obj){parentClass = (DirectAudioTrack *)obj;}
+ virtual ~PMDeathRecipient() {}
+
+ // IBinder::DeathRecipient
+ virtual void binderDied(const wp<IBinder>& who);
+
+ private:
+ DirectAudioTrack *parentClass;
+ PMDeathRecipient(const PMDeathRecipient&);
+ PMDeathRecipient& operator = (const PMDeathRecipient&);
+
+ friend class DirectAudioTrack;
+ };
+
+ friend class PMDeathRecipient;
+
+ Mutex pmLock;
+ void acquireWakeLock();
+ void releaseWakeLock();
+
+ sp<IPowerManager> mPowerManager;
+ sp<IBinder> mWakeLockToken;
+ sp<PMDeathRecipient> mDeathRecipient;
+ };
+#endif
+
class TrackHandle : public android::BnAudioTrack {
public:
TrackHandle(const sp<PlaybackThread::Track>& track);
@@ -1424,7 +1569,7 @@ private:
};
void removeClient_l(pid_t pid);
- void removeNotificationClient(pid_t pid);
+ void removeNotificationClient(sp<IBinder> binder);
// record thread
@@ -1636,7 +1781,14 @@ private:
void *pReplyData);
void reset_l();
+#ifdef QCOM_HARDWARE
+ status_t configure(bool isForLPA = false,
+ int sampleRate = 0,
+ int channelCount = 0,
+ int frameCount = 0);
+#else
status_t configure();
+#endif
status_t init();
effect_state state() const {
return mState;
@@ -1683,7 +1835,10 @@ private:
bool purgeHandles();
void lock() { mLock.lock(); }
void unlock() { mLock.unlock(); }
-
+#ifdef QCOM_HARDWARE
+ bool isOnLPA() { return mIsForLPA;}
+ void setLPAFlag(bool isForLPA) {mIsForLPA = isForLPA; }
+#endif
void dump(int fd, const Vector<String16>& args);
protected:
@@ -1715,6 +1870,9 @@ mutable Mutex mLock; // mutex for process, commands and handl
// sending disable command.
uint32_t mDisableWaitCnt; // current process() calls count during disable period.
bool mSuspended; // effect is suspended: temporarily disabled by framework
+#ifdef QCOM_HARDWARE
+ bool mIsForLPA;
+#endif
};
// The EffectHandle class implements the IEffect interface. It provides resources
@@ -1823,12 +1981,18 @@ mutable Mutex mLock; // mutex for process, commands and handl
status_t addEffect_l(const sp<EffectModule>& handle);
size_t removeEffect_l(const sp<EffectModule>& handle);
+#ifdef QCOM_HARDWARE
+ size_t getNumEffects() { return mEffects.size(); }
+#endif
int sessionId() const { return mSessionId; }
void setSessionId(int sessionId) { mSessionId = sessionId; }
sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor);
sp<EffectModule> getEffectFromId_l(int id);
+#ifdef QCOM_HARDWARE
+ sp<EffectModule> getEffectFromIndex_l(int idx);
+#endif
sp<EffectModule> getEffectFromType_l(const effect_uuid_t *type);
bool setVolume_l(uint32_t *left, uint32_t *right);
void setDevice_l(audio_devices_t device);
@@ -1874,6 +2038,10 @@ mutable Mutex mLock; // mutex for process, commands and handl
void clearInputBuffer();
void dump(int fd, const Vector<String16>& args);
+#ifdef QCOM_HARDWARE
+ bool isForLPATrack() {return mIsForLPATrack; }
+ void setLPAFlag(bool flag) {mIsForLPATrack = flag;}
+#endif
protected:
friend class AudioFlinger; // for mThread, mEffects
@@ -1922,6 +2090,9 @@ mutable Mutex mLock; // mutex for process, commands and handl
uint32_t mNewLeftVolume; // new volume on left channel
uint32_t mNewRightVolume; // new volume on right channel
uint32_t mStrategy; // strategy for this effect chain
+#ifdef QCOM_HARDWARE
+ bool mIsForLPATrack;
+#endif
// mSuspendedEffects lists all effects currently suspended in the chain.
// Use effect type UUID timelow field as key. There is no real risk of identical
// timeLow fields among effect type UUIDs.
@@ -1983,7 +2154,21 @@ mutable Mutex mLock; // mutex for process, commands and handl
AudioStreamIn(AudioHwDevice *dev, audio_stream_in_t *in) :
audioHwDev(dev), stream(in) {}
};
-
+#ifdef QCOM_HARDWARE
+ struct AudioSessionDescriptor {
+ bool mActive;
+ int mStreamType;
+ float mVolumeLeft;
+ float mVolumeRight;
+ audio_hw_device_t *hwDev;
+ audio_stream_out_t *stream;
+ audio_output_flags_t flag;
+ void *trackRefPtr;
+ audio_devices_t device;
+ AudioSessionDescriptor(audio_hw_device_t *dev, audio_stream_out_t *out, audio_output_flags_t outflag) :
+ hwDev(dev), stream(out), flag(outflag) {}
+ };
+#endif
// for mAudioSessionRefs only
struct AudioSessionRef {
AudioSessionRef(int sessionid, pid_t pid) :
@@ -2043,14 +2228,26 @@ mutable Mutex mLock; // mutex for process, commands and handl
DefaultKeyedVector< audio_io_handle_t, sp<RecordThread> > mRecordThreads;
- DefaultKeyedVector< pid_t, sp<NotificationClient> > mNotificationClients;
+ DefaultKeyedVector< sp<IBinder>, sp<NotificationClient> > mNotificationClients;
volatile int32_t mNextUniqueId; // updated by android_atomic_inc
audio_mode_t mMode;
bool mBtNrecIsOff;
-
+#ifdef QCOM_HARDWARE
+ DefaultKeyedVector<audio_io_handle_t, AudioSessionDescriptor *> mDirectAudioTracks;
+ int mA2DPHandle; // Handle to notify A2DP connection status
+#endif
// protected by mLock
+#ifdef QCOM_HARDWARE
+ volatile bool mIsEffectConfigChanged;
+#endif
Vector<AudioSessionRef*> mAudioSessionRefs;
-
+#ifdef QCOM_HARDWARE
+ sp<EffectChain> mLPAEffectChain;
+ int mLPASessionId;
+ int mLPASampleRate;
+ int mLPANumChannels;
+ volatile bool mAllChainsLocked;
+#endif
float masterVolume_l() const;
bool masterMute_l() const;
audio_module_handle_t loadHwModule_l(const char *name);
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index 1e4049a..ab6f38f 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -549,7 +549,11 @@ bool AudioMixer::track_t::setResampler(uint32_t value, uint32_t devSampleRate)
(value == 48000 && devSampleRate == 44100))) {
quality = AudioResampler::LOW_QUALITY;
} else {
+#ifdef QCOM_ENHANCED_AUDIO
+ quality = AudioResampler::VERY_HIGH_QUALITY;
+#else
quality = AudioResampler::DEFAULT_QUALITY;
+#endif
}
resampler = AudioResampler::create(
format,
diff --git a/services/audioflinger/AudioPolicyService.cpp b/services/audioflinger/AudioPolicyService.cpp
index 8b99bd2..7dd46f2 100644
--- a/services/audioflinger/AudioPolicyService.cpp
+++ b/services/audioflinger/AudioPolicyService.cpp
@@ -388,12 +388,16 @@ status_t AudioPolicyService::setStreamVolumeIndex(audio_stream_type_t stream,
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
+
+#ifndef ICS_AUDIO_BLOB
if (mpAudioPolicy->set_stream_volume_index_for_device) {
return mpAudioPolicy->set_stream_volume_index_for_device(mpAudioPolicy,
stream,
index,
device);
- } else {
+ } else
+#endif
+ {
return mpAudioPolicy->set_stream_volume_index(mpAudioPolicy, stream, index);
}
}
@@ -409,12 +413,15 @@ status_t AudioPolicyService::getStreamVolumeIndex(audio_stream_type_t stream,
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
+#ifndef ICS_AUDIO_BLOB
if (mpAudioPolicy->get_stream_volume_index_for_device) {
return mpAudioPolicy->get_stream_volume_index_for_device(mpAudioPolicy,
stream,
index,
device);
- } else {
+ } else
+#endif
+ {
return mpAudioPolicy->get_stream_volume_index(mpAudioPolicy, stream, index);
}
}
diff --git a/services/camera/libcameraservice/Camera2Client.cpp b/services/camera/libcameraservice/Camera2Client.cpp
index 0f1e650..bddcb8c 100644
--- a/services/camera/libcameraservice/Camera2Client.cpp
+++ b/services/camera/libcameraservice/Camera2Client.cpp
@@ -1430,11 +1430,13 @@ void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) {
}
}
if (sendMovingMessage) {
+#ifndef OMAP_ICS_CAMERA
SharedCameraClient::Lock l(mSharedCameraClient);
if (l.mCameraClient != 0) {
l.mCameraClient->notifyCallback(CAMERA_MSG_FOCUS_MOVE,
afInMotion ? 1 : 0, 0);
}
+#endif
}
if (sendCompletedMessage) {
SharedCameraClient::Lock l(mSharedCameraClient);
diff --git a/services/camera/libcameraservice/CameraClient.cpp b/services/camera/libcameraservice/CameraClient.cpp
index b930c02..1cdb938 100644
--- a/services/camera/libcameraservice/CameraClient.cpp
+++ b/services/camera/libcameraservice/CameraClient.cpp
@@ -83,8 +83,14 @@ status_t CameraClient::initialize(camera_module_t *module) {
(void *)mCameraId);
// Enable zoom, error, focus, and metadata messages by default
- enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS |
- CAMERA_MSG_PREVIEW_METADATA | CAMERA_MSG_FOCUS_MOVE);
+ enableMsgType(CAMERA_MSG_ERROR | CAMERA_MSG_ZOOM | CAMERA_MSG_FOCUS
+#ifndef QCOM_HARDWARE
+ | CAMERA_MSG_PREVIEW_METADATA
+#endif
+#ifndef OMAP_ICS_CAMERA
+ | CAMERA_MSG_FOCUS_MOVE
+#endif
+ );
LOG1("CameraClient::initialize X (pid %d, id %d)", callingPid, mCameraId);
return OK;
@@ -244,9 +250,14 @@ void CameraClient::disconnect() {
// Release the held ANativeWindow resources.
if (mPreviewWindow != 0) {
+#ifdef QCOM_HARDWARE
+ mHardware->setPreviewWindow(0);
+#endif
disconnectWindow(mPreviewWindow);
mPreviewWindow = 0;
+#ifndef QCOM_HARDWARE
mHardware->setPreviewWindow(mPreviewWindow);
+#endif
}
mHardware.clear();
@@ -285,6 +296,10 @@ status_t CameraClient::setPreviewWindow(const sp<IBinder>& binder,
native_window_set_buffers_transform(window.get(), mOrientation);
result = mHardware->setPreviewWindow(window);
}
+#ifdef QCOM_HARDWARE
+ } else {
+ result = mHardware->setPreviewWindow(window);
+#endif
}
if (result == NO_ERROR) {
@@ -344,6 +359,9 @@ void CameraClient::setPreviewCallbackFlag(int callback_flag) {
// start preview mode
status_t CameraClient::startPreview() {
LOG1("startPreview (pid %d)", getCallingPid());
+#ifdef QCOM_HARDWARE
+ enableMsgType(CAMERA_MSG_PREVIEW_METADATA);
+#endif
return startCameraMode(CAMERA_PREVIEW_MODE);
}
@@ -393,6 +411,11 @@ status_t CameraClient::startPreviewMode() {
native_window_set_buffers_transform(mPreviewWindow.get(),
mOrientation);
}
+
+#if defined(OMAP_ICS_CAMERA) || defined(OMAP_ENHANCEMENT_BURST_CAPTURE)
+ disableMsgType(CAMERA_MSG_COMPRESSED_BURST_IMAGE);
+#endif
+
mHardware->setPreviewWindow(mPreviewWindow);
result = mHardware->startPreview();
@@ -429,9 +452,22 @@ status_t CameraClient::startRecordingMode() {
// stop preview mode
void CameraClient::stopPreview() {
LOG1("stopPreview (pid %d)", getCallingPid());
+#ifdef QCOM_HARDWARE
+ disableMsgType(CAMERA_MSG_PREVIEW_METADATA);
+#endif
Mutex::Autolock lock(mLock);
if (checkPidAndHardware() != NO_ERROR) return;
+#ifdef OMAP_ENHANCEMENT
+ // According to framework documentation, preview needs
+ // to be started for image capture. This will make sure
+ // that image capture related messages get disabled if
+ // not done already in their respective handlers.
+ // If these messages come when in the midddle of
+ // stopping preview we will deadlock the system in
+ // lockIfMessageWanted().
+ disableMsgType(CAMERA_MSG_POSTVIEW_FRAME);
+#endif
disableMsgType(CAMERA_MSG_PREVIEW_FRAME);
mHardware->stopPreview();
@@ -527,8 +563,18 @@ status_t CameraClient::takePicture(int msgType) {
CAMERA_MSG_POSTVIEW_FRAME |
CAMERA_MSG_RAW_IMAGE |
CAMERA_MSG_RAW_IMAGE_NOTIFY |
+#if defined(OMAP_ICS_CAMERA) || defined(OMAP_ENHANCEMENT_BURST_CAPTURE)
+ CAMERA_MSG_RAW_BURST |
+#endif
CAMERA_MSG_COMPRESSED_IMAGE);
+#if defined(OMAP_ICS_CAMERA) || defined(OMAP_ENHANCEMENT_BURST_CAPTURE)
+ picMsgType |= CAMERA_MSG_COMPRESSED_BURST_IMAGE;
+#endif
+
+#ifdef QCOM_HARDWARE
+ disableMsgType(CAMERA_MSG_PREVIEW_METADATA);
+#endif
enableMsgType(picMsgType);
return mHardware->takePicture();
@@ -687,6 +733,12 @@ void CameraClient::notifyCallback(int32_t msgType, int32_t ext1,
int32_t ext2, void* user) {
LOG2("notifyCallback(%d)", msgType);
+ // Ignore CAF_RESTART callbacks from Samsung's camera driver
+ if (msgType == CAMERA_MSG_FOCUS && ext1 == 4) {
+ LOG2("Ignore CAF_RESTART callback");
+ return;
+ }
+
Mutex* lock = getClientLockFromCookie(user);
if (lock == NULL) return;
Mutex::Autolock alock(*lock);
@@ -740,6 +792,11 @@ void CameraClient::dataCallback(int32_t msgType,
case CAMERA_MSG_COMPRESSED_IMAGE:
client->handleCompressedPicture(dataPtr);
break;
+#if defined(OMAP_ICS_CAMERA) || defined(OMAP_ENHANCEMENT_BURST_CAPTURE)
+ case CAMERA_MSG_COMPRESSED_BURST_IMAGE:
+ client->handleCompressedBurstPicture(dataPtr);
+ break;
+#endif
default:
client->handleGenericData(msgType, dataPtr, metadata);
break;
@@ -869,6 +926,20 @@ void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
}
}
+#if defined(OMAP_ICS_CAMERA) || defined(OMAP_ENHANCEMENT_BURST_CAPTURE)
+// burst picture callback - compressed picture ready
+void CameraClient::handleCompressedBurstPicture(const sp<IMemory>& mem) {
+ // Don't disable this message type yet. In this mode takePicture() will
+ // get called only once. When burst finishes this message will get automatically
+ // disabled in the respective call for restarting the preview.
+
+ sp<ICameraClient> c = mCameraClient;
+ mLock.unlock();
+ if (c != 0) {
+ c->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mem, NULL);
+ }
+}
+#endif
void CameraClient::handleGenericNotify(int32_t msgType,
int32_t ext1, int32_t ext2) {
diff --git a/services/camera/libcameraservice/CameraClient.h b/services/camera/libcameraservice/CameraClient.h
index 2f31c4e..9b10706 100644
--- a/services/camera/libcameraservice/CameraClient.h
+++ b/services/camera/libcameraservice/CameraClient.h
@@ -97,6 +97,9 @@ private:
void handlePostview(const sp<IMemory>& mem);
void handleRawPicture(const sp<IMemory>& mem);
void handleCompressedPicture(const sp<IMemory>& mem);
+#if defined(OMAP_ICS_CAMERA) || defined(OMAP_ENHANCEMENT_BURST_CAPTURE)
+ void handleCompressedBurstPicture(const sp<IMemory>& mem);
+#endif
void handleGenericNotify(int32_t msgType, int32_t ext1, int32_t ext2);
void handleGenericData(int32_t msgType, const sp<IMemory>& dataPtr,
camera_frame_metadata_t *metadata);
diff --git a/services/camera/libcameraservice/CameraHardwareInterface.h b/services/camera/libcameraservice/CameraHardwareInterface.h
index 05ac9fa..d67996e 100644
--- a/services/camera/libcameraservice/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/CameraHardwareInterface.h
@@ -113,6 +113,12 @@ public:
ALOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get());
if (mDevice->ops->set_preview_window) {
+#ifdef QCOM_HARDWARE
+ ALOGV("%s buf %p mPreviewWindow %p", __FUNCTION__, buf.get(), mPreviewWindow.get());
+ if (mPreviewWindow.get() && (buf.get() != mPreviewWindow.get())) {
+ mDevice->ops->set_preview_window(mDevice, 0);
+ }
+#endif
mPreviewWindow = buf;
mHalPreviewWindow.user = this;
ALOGV("%s &mHalPreviewWindow %p mHalPreviewWindow.user %p", __FUNCTION__,
@@ -456,13 +462,17 @@ private:
ALOGV("%s", __FUNCTION__);
CameraHardwareInterface *__this =
static_cast<CameraHardwareInterface *>(user);
- sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(data->handle));
- if (index >= mem->mNumBufs) {
+ if (data != NULL) {
+ sp<CameraHeapMemory> mem(static_cast<CameraHeapMemory *>(data->handle));
+ if (index >= mem->mNumBufs) {
ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
index, mem->mNumBufs);
return;
+ }
+ __this->mDataCb(msg_type, mem->mBuffers[index], metadata, __this->mCbUser);
+ } else {
+ __this->mDataCb(msg_type, NULL, metadata, __this->mCbUser);
}
- __this->mDataCb(msg_type, mem->mBuffers[index], metadata, __this->mCbUser);
}
static void __data_cb_timestamp(nsecs_t timestamp, int32_t msg_type,
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 124d24d..d8365eb 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -37,6 +37,7 @@
#include <utils/Log.h>
#include <utils/String16.h>
+#include <system/camera.h>
#include "CameraService.h"
#include "CameraClient.h"
#include "Camera2Client.h"
@@ -358,8 +359,18 @@ void CameraService::loadSound() {
LOG1("CameraService::loadSound ref=%d", mSoundRef);
if (mSoundRef++) return;
- mSoundPlayer[SOUND_SHUTTER] = newMediaPlayer("/system/media/audio/ui/camera_click.ogg");
- mSoundPlayer[SOUND_RECORDING] = newMediaPlayer("/system/media/audio/ui/VideoRecord.ogg");
+ char value[PROPERTY_VALUE_MAX];
+ property_get("persist.camera.shutter.disable", value, "0");
+ int disableSound = atoi(value);
+
+ if(!disableSound) {
+ mSoundPlayer[SOUND_SHUTTER] = newMediaPlayer("/system/media/audio/ui/camera_click.ogg");
+ mSoundPlayer[SOUND_RECORDING] = newMediaPlayer("/system/media/audio/ui/VideoRecord.ogg");
+ }
+ else {
+ mSoundPlayer[SOUND_SHUTTER] = NULL;
+ mSoundPlayer[SOUND_RECORDING] = NULL;
+ }
}
void CameraService::releaseSound() {
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 4dab340..7844047 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -23,7 +23,11 @@
#include <hardware/camera.h>
/* This needs to be increased if we can have more cameras */
+#ifdef OMAP_ENHANCEMENT
+#define MAX_CAMERAS 3
+#else
#define MAX_CAMERAS 2
+#endif
namespace android {