summaryrefslogtreecommitdiffstats
path: root/media/libmedia
diff options
context:
space:
mode:
Diffstat (limited to 'media/libmedia')
-rw-r--r--media/libmedia/Android.mk1
-rw-r--r--media/libmedia/AudioRecord.cpp167
-rw-r--r--media/libmedia/AudioTrack.cpp99
-rw-r--r--media/libmedia/IAudioFlinger.cpp14
-rw-r--r--media/libmedia/IAudioPolicyService.cpp14
-rw-r--r--media/libmedia/IMediaHTTPConnection.cpp26
-rw-r--r--media/libmedia/JetPlayer.cpp2
-rw-r--r--media/libmedia/SoundPool.cpp2
8 files changed, 191 insertions, 134 deletions
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index e0acae6..f3770e4 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -72,7 +72,6 @@ LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper
LOCAL_MODULE:= libmedia
LOCAL_C_INCLUDES := \
- $(call include-path-for, graphics corecg) \
$(TOP)/frameworks/native/include/media/openmax \
external/icu4c/common \
external/icu4c/i18n \
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 700718d..961b0a2 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -41,30 +41,22 @@ status_t AudioRecord::getMinFrameCount(
return BAD_VALUE;
}
- // default to 0 in case of error
- *frameCount = 0;
-
- size_t size = 0;
+ size_t size;
status_t status = AudioSystem::getInputBufferSize(sampleRate, format, channelMask, &size);
if (status != NO_ERROR) {
- ALOGE("AudioSystem could not query the input buffer size; status %d", status);
- return NO_INIT;
+ ALOGE("AudioSystem could not query the input buffer size for sampleRate %u, format %#x, "
+ "channelMask %#x; status %d", sampleRate, format, channelMask, status);
+ return status;
}
- if (size == 0) {
+ // We double the size of input buffer for ping pong use of record buffer.
+ // Assumes audio_is_linear_pcm(format)
+ if ((*frameCount = (size * 2) / (popcount(channelMask) * audio_bytes_per_sample(format))) == 0) {
ALOGE("Unsupported configuration: sampleRate %u, format %#x, channelMask %#x",
sampleRate, format, channelMask);
return BAD_VALUE;
}
- // We double the size of input buffer for ping pong use of record buffer.
- size <<= 1;
-
- // Assumes audio_is_linear_pcm(format)
- uint32_t channelCount = popcount(channelMask);
- size /= channelCount * audio_bytes_per_sample(format);
-
- *frameCount = size;
return NO_ERROR;
}
@@ -81,10 +73,10 @@ AudioRecord::AudioRecord(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
callback_t cbf,
void* user,
- int notificationFrames,
+ uint32_t notificationFrames,
int sessionId,
transfer_type transferType,
audio_input_flags_t flags __unused)
@@ -110,10 +102,8 @@ AudioRecord::~AudioRecord()
mAudioRecordThread->requestExitAndWait();
mAudioRecordThread.clear();
}
- if (mAudioRecord != 0) {
- mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
- mAudioRecord.clear();
- }
+ mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
+ mAudioRecord.clear();
IPCThreadState::self()->flushCommands();
AudioSystem::releaseAudioSessionId(mSessionId, -1);
}
@@ -124,15 +114,20 @@ status_t AudioRecord::set(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCountInt,
+ size_t frameCount,
callback_t cbf,
void* user,
- int notificationFrames,
+ uint32_t notificationFrames,
bool threadCanCallJava,
int sessionId,
transfer_type transferType,
audio_input_flags_t flags)
{
+ ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
+ "notificationFrames %u, sessionId %d, transferType %d, flags %#x",
+ inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
+ sessionId, transferType, flags);
+
switch (transferType) {
case TRANSFER_DEFAULT:
if (cbf == NULL || threadCanCallJava) {
@@ -156,23 +151,15 @@ status_t AudioRecord::set(
}
mTransfer = transferType;
- // FIXME "int" here is legacy and will be replaced by size_t later
- if (frameCountInt < 0) {
- ALOGE("Invalid frame count %d", frameCountInt);
- return BAD_VALUE;
- }
- size_t frameCount = frameCountInt;
-
- ALOGV("set(): sampleRate %u, channelMask %#x, frameCount %u", sampleRate, channelMask,
- frameCount);
-
AutoMutex lock(mLock);
+ // invariant that mAudioRecord != 0 is true only after set() returns successfully
if (mAudioRecord != 0) {
ALOGE("Track already in use");
return INVALID_OPERATION;
}
+ // handle default values first.
if (inputSource == AUDIO_SOURCE_DEFAULT) {
inputSource = AUDIO_SOURCE_MIC;
}
@@ -209,15 +196,19 @@ status_t AudioRecord::set(
uint32_t channelCount = popcount(channelMask);
mChannelCount = channelCount;
- // Assumes audio_is_linear_pcm(format), else sizeof(uint8_t)
- mFrameSize = channelCount * audio_bytes_per_sample(format);
+ if (audio_is_linear_pcm(format)) {
+ mFrameSize = channelCount * audio_bytes_per_sample(format);
+ } else {
+ mFrameSize = sizeof(uint8_t);
+ }
// validate framecount
- size_t minFrameCount = 0;
+ size_t minFrameCount;
status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
sampleRate, format, channelMask);
if (status != NO_ERROR) {
- ALOGE("getMinFrameCount() failed; status %d", status);
+ ALOGE("getMinFrameCount() failed for sampleRate %u, format %#x, channelMask %#x; status %d",
+ sampleRate, format, channelMask, status);
return status;
}
ALOGV("AudioRecord::set() minFrameCount = %d", minFrameCount);
@@ -242,23 +233,27 @@ status_t AudioRecord::set(
ALOGV("set(): mSessionId %d", mSessionId);
mFlags = flags;
+ mCbf = cbf;
+
+ if (cbf != NULL) {
+ mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
+ mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
+ }
// create the IAudioRecord
status = openRecord_l(0 /*epoch*/);
+
if (status != NO_ERROR) {
+ if (mAudioRecordThread != 0) {
+ mAudioRecordThread->requestExit(); // see comment in AudioRecord.h
+ mAudioRecordThread->requestExitAndWait();
+ mAudioRecordThread.clear();
+ }
return status;
}
- if (cbf != NULL) {
- mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
- mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
- }
-
mStatus = NO_ERROR;
-
mActive = false;
- mCbf = cbf;
- mRefreshRemaining = true;
mUserData = user;
// TODO: add audio hardware input latency here
mLatency = (1000*mFrameCount) / sampleRate;
@@ -433,22 +428,37 @@ status_t AudioRecord::openRecord_l(size_t epoch)
return NO_INIT;
}
- IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
- pid_t tid = -1;
+ // Fast tracks must be at the primary _output_ [sic] sampling rate,
+ // because there is currently no concept of a primary input sampling rate
+ uint32_t afSampleRate = AudioSystem::getPrimaryOutputSamplingRate();
+ if (afSampleRate == 0) {
+ ALOGW("getPrimaryOutputSamplingRate failed");
+ }
// Client can only express a preference for FAST. Server will perform additional tests.
- // The only supported use case for FAST is callback transfer mode.
+ if ((mFlags & AUDIO_INPUT_FLAG_FAST) && !(
+ // use case: callback transfer mode
+ (mTransfer == TRANSFER_CALLBACK) &&
+ // matching sample rate
+ (mSampleRate == afSampleRate))) {
+ ALOGW("AUDIO_INPUT_FLAG_FAST denied by client");
+ // once denied, do not request again if IAudioRecord is re-created
+ mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
+ }
+
+ IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
+
+ pid_t tid = -1;
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
- if ((mTransfer != TRANSFER_CALLBACK) || (mAudioRecordThread == 0)) {
- ALOGW("AUDIO_INPUT_FLAG_FAST denied by client");
- // once denied, do not request again if IAudioRecord is re-created
- mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
- } else {
- trackFlags |= IAudioFlinger::TRACK_FAST;
+ trackFlags |= IAudioFlinger::TRACK_FAST;
+ if (mAudioRecordThread != 0) {
tid = mAudioRecordThread->getTid();
}
}
+ // FIXME Assume double buffering, because we don't know the true HAL sample rate
+ const uint32_t nBuffering = 2;
+
mNotificationFramesAct = mNotificationFramesReq;
size_t frameCount = mReqFrameCount;
@@ -485,10 +495,12 @@ status_t AudioRecord::openRecord_l(size_t epoch)
ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
"session ID changed from %d to %d", originalSessionId, mSessionId);
- if (record == 0 || status != NO_ERROR) {
+ if (status != NO_ERROR) {
ALOGE("AudioFlinger could not create record track, status: %d", status);
goto release;
}
+ ALOG_ASSERT(record != 0);
+
// AudioFlinger now owns the reference to the I/O handle,
// so we are no longer responsible for releasing it.
@@ -502,52 +514,55 @@ status_t AudioRecord::openRecord_l(size_t epoch)
ALOGE("Could not get control block pointer");
return NO_INIT;
}
+ // invariant that mAudioRecord != 0 is true only after set() returns successfully
if (mAudioRecord != 0) {
mAudioRecord->asBinder()->unlinkToDeath(mDeathNotifier, this);
mDeathNotifier.clear();
}
-
- // We retain a copy of the I/O handle, but don't own the reference
- mInput = input;
mAudioRecord = record;
+
mCblkMemory = iMem;
audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
mCblk = cblk;
- // note that temp is the (possibly revised) value of mFrameCount
+ // note that temp is the (possibly revised) value of frameCount
if (temp < frameCount || (frameCount == 0 && temp == 0)) {
ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
}
frameCount = temp;
- // If IAudioRecord is re-created, don't let the requested frameCount
- // decrease. This can confuse clients that cache frameCount().
- if (frameCount > mReqFrameCount) {
- mReqFrameCount = frameCount;
- }
- // FIXME missing fast track frameCount logic
mAwaitBoost = false;
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
if (trackFlags & IAudioFlinger::TRACK_FAST) {
- ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %u", mFrameCount);
+ ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %u", frameCount);
mAwaitBoost = true;
- // double-buffering is not required for fast tracks, due to tighter scheduling
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > mFrameCount) {
- mNotificationFramesAct = mFrameCount;
- }
} else {
- ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %u", mFrameCount);
+ ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
// once denied, do not request again if IAudioRecord is re-created
mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > mFrameCount/2) {
- mNotificationFramesAct = mFrameCount/2;
- }
+ }
+ // Theoretically double-buffering is not required for fast tracks,
+ // due to tighter scheduling. But in practice, to accomodate kernels with
+ // scheduling jitter, and apps with computation jitter, we use double-buffering.
+ if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
+ mNotificationFramesAct = frameCount/nBuffering;
}
}
- // starting address of buffers in shared memory
+ // We retain a copy of the I/O handle, but don't own the reference
+ mInput = input;
+ mRefreshRemaining = true;
+
+ // Starting address of buffers in shared memory, immediately after the control block. This
+ // address is for the mapping within client address space. AudioFlinger::TrackBase::mBuffer
+ // is for the server address space.
void *buffers = (char*)cblk + sizeof(audio_track_cblk_t);
mFrameCount = frameCount;
+ // If IAudioRecord is re-created, don't let the requested frameCount
+ // decrease. This can confuse clients that cache frameCount().
+ if (frameCount > mReqFrameCount) {
+ mReqFrameCount = frameCount;
+ }
// update proxy
mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
@@ -799,7 +814,7 @@ nsecs_t AudioRecord::processAudioBuffer()
}
// Cache other fields that will be needed soon
- size_t notificationFrames = mNotificationFramesAct;
+ uint32_t notificationFrames = mNotificationFramesAct;
if (mRefreshRemaining) {
mRefreshRemaining = false;
mRemainingFrames = notificationFrames;
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 3184902..60ed626 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -99,7 +99,8 @@ AudioTrack::AudioTrack()
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
- mPreviousSchedulingGroup(SP_DEFAULT)
+ mPreviousSchedulingGroup(SP_DEFAULT),
+ mPausedPosition(0)
{
}
@@ -108,11 +109,11 @@ AudioTrack::AudioTrack(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCount,
+ size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
- int notificationFrames,
+ uint32_t notificationFrames,
int sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
@@ -121,7 +122,8 @@ AudioTrack::AudioTrack(
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
- mPreviousSchedulingGroup(SP_DEFAULT)
+ mPreviousSchedulingGroup(SP_DEFAULT),
+ mPausedPosition(0)
{
mStatus = set(streamType, sampleRate, format, channelMask,
frameCount, flags, cbf, user, notificationFrames,
@@ -138,7 +140,7 @@ AudioTrack::AudioTrack(
audio_output_flags_t flags,
callback_t cbf,
void* user,
- int notificationFrames,
+ uint32_t notificationFrames,
int sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
@@ -147,7 +149,8 @@ AudioTrack::AudioTrack(
: mStatus(NO_INIT),
mIsTimed(false),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
- mPreviousSchedulingGroup(SP_DEFAULT)
+ mPreviousSchedulingGroup(SP_DEFAULT),
+ mPausedPosition(0)
{
mStatus = set(streamType, sampleRate, format, channelMask,
0 /*frameCount*/, flags, cbf, user, notificationFrames,
@@ -182,11 +185,11 @@ status_t AudioTrack::set(
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
- int frameCountInt,
+ size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
- int notificationFrames,
+ uint32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
int sessionId,
@@ -195,6 +198,11 @@ status_t AudioTrack::set(
int uid,
pid_t pid)
{
+ ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
+ "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
+ streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
+ sessionId, transferType);
+
switch (transferType) {
case TRANSFER_DEFAULT:
if (sharedBuffer != 0) {
@@ -231,13 +239,6 @@ status_t AudioTrack::set(
mSharedBuffer = sharedBuffer;
mTransfer = transferType;
- // FIXME "int" here is legacy and will be replaced by size_t later
- if (frameCountInt < 0) {
- ALOGE("Invalid frame count %d", frameCountInt);
- return BAD_VALUE;
- }
- size_t frameCount = frameCountInt;
-
ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
sharedBuffer->size());
@@ -288,6 +289,9 @@ status_t AudioTrack::set(
ALOGE("Invalid channel mask %#x", channelMask);
return BAD_VALUE;
}
+ mChannelMask = channelMask;
+ uint32_t channelCount = popcount(channelMask);
+ mChannelCount = channelCount;
// AudioFlinger does not currently support 8-bit data in shared memory
if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
@@ -311,10 +315,6 @@ status_t AudioTrack::set(
flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
}
- mChannelMask = channelMask;
- uint32_t channelCount = popcount(channelMask);
- mChannelCount = channelCount;
-
if (audio_is_linear_pcm(format)) {
mFrameSize = channelCount * audio_bytes_per_sample(format);
mFrameSizeAF = channelCount * sizeof(int16_t);
@@ -554,6 +554,16 @@ void AudioTrack::pause()
}
mProxy->interrupt();
mAudioTrack->pause();
+
+ if (isOffloaded_l()) {
+ if (mOutput != 0) {
+ uint32_t halFrames;
+ // OffloadThread sends HAL pause in its threadLoop.. time saved
+ // here can be slightly off
+ AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
+ ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
+ }
+ }
}
status_t AudioTrack::setVolume(float left, float right)
@@ -773,6 +783,12 @@ status_t AudioTrack::getPosition(uint32_t *position) const
if (isOffloaded_l()) {
uint32_t dspFrames = 0;
+ if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
+ ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
+ *position = mPausedPosition;
+ return NO_ERROR;
+ }
+
if (mOutput != 0) {
uint32_t halFrames;
AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
@@ -888,8 +904,8 @@ status_t AudioTrack::createTrack_l(size_t epoch)
// either of these use cases:
// use case 1: shared buffer
(mSharedBuffer != 0) ||
- // use case 2: callback handler
- (mCbf != NULL)) &&
+ // use case 2: callback transfer mode
+ (mTransfer == TRANSFER_CALLBACK)) &&
// matching sample rate
(mSampleRate == afSampleRate))) {
ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
@@ -1012,10 +1028,12 @@ status_t AudioTrack::createTrack_l(size_t epoch)
mClientUid,
&status);
- if (track == 0) {
+ if (status != NO_ERROR) {
ALOGE("AudioFlinger could not create track, status: %d", status);
goto release;
}
+ ALOG_ASSERT(track != 0);
+
// AudioFlinger now owns the reference to the I/O handle,
// so we are no longer responsible for releasing it.
@@ -1035,6 +1053,7 @@ status_t AudioTrack::createTrack_l(size_t epoch)
mDeathNotifier.clear();
}
mAudioTrack = track;
+
mCblkMemory = iMem;
audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
mCblk = cblk;
@@ -1046,6 +1065,7 @@ status_t AudioTrack::createTrack_l(size_t epoch)
ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
}
frameCount = temp;
+
mAwaitBoost = false;
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
if (trackFlags & IAudioFlinger::TRACK_FAST) {
@@ -1099,6 +1119,7 @@ status_t AudioTrack::createTrack_l(size_t epoch)
mAudioTrack->attachAuxEffect(mAuxEffectId);
// FIXME don't believe this lie
mLatency = afLatency + (1000*frameCount) / mSampleRate;
+
mFrameCount = frameCount;
// If IAudioTrack is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
@@ -1479,7 +1500,7 @@ nsecs_t AudioTrack::processAudioBuffer()
// Cache other fields that will be needed soon
uint32_t loopPeriod = mLoopPeriod;
uint32_t sampleRate = mSampleRate;
- size_t notificationFrames = mNotificationFramesAct;
+ uint32_t notificationFrames = mNotificationFramesAct;
if (mRefreshRemaining) {
mRefreshRemaining = false;
mRemainingFrames = notificationFrames;
@@ -1487,6 +1508,7 @@ nsecs_t AudioTrack::processAudioBuffer()
}
size_t misalignment = mProxy->getMisalignment();
uint32_t sequence = mSequence;
+ sp<AudioTrackClientProxy> proxy = mProxy;
// These fields don't need to be cached, because they are assigned only by set():
// mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
@@ -1495,35 +1517,32 @@ nsecs_t AudioTrack::processAudioBuffer()
mLock.unlock();
if (waitStreamEnd) {
- AutoMutex lock(mLock);
-
- sp<AudioTrackClientProxy> proxy = mProxy;
- sp<IMemory> iMem = mCblkMemory;
-
struct timespec timeout;
timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
timeout.tv_nsec = 0;
- mLock.unlock();
- status_t status = mProxy->waitStreamEndDone(&timeout);
- mLock.lock();
+ status_t status = proxy->waitStreamEndDone(&timeout);
switch (status) {
case NO_ERROR:
case DEAD_OBJECT:
case TIMED_OUT:
- mLock.unlock();
mCbf(EVENT_STREAM_END, mUserData, NULL);
- mLock.lock();
- if (mState == STATE_STOPPING) {
- mState = STATE_STOPPED;
- if (status != DEAD_OBJECT) {
- return NS_INACTIVE;
+ {
+ AutoMutex lock(mLock);
+ // The previously assigned value of waitStreamEnd is no longer valid,
+ // since the mutex has been unlocked and either the callback handler
+ // or another thread could have re-started the AudioTrack during that time.
+ waitStreamEnd = mState == STATE_STOPPING;
+ if (waitStreamEnd) {
+ mState = STATE_STOPPED;
}
}
- return 0;
- default:
- return 0;
+ if (waitStreamEnd && status != DEAD_OBJECT) {
+ return NS_INACTIVE;
+ }
+ break;
}
+ return 0;
}
// perform callbacks while unlocked
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index e696323..a9a9f1a 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -58,7 +58,7 @@ enum {
RESTORE_OUTPUT,
OPEN_INPUT,
CLOSE_INPUT,
- SET_STREAM_OUTPUT,
+ INVALIDATE_STREAM,
SET_VOICE_VOLUME,
GET_RENDER_POSITION,
GET_INPUT_FRAMES_LOST,
@@ -545,13 +545,12 @@ public:
return reply.readInt32();
}
- virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output)
+ virtual status_t invalidateStream(audio_stream_type_t stream)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32((int32_t) stream);
- data.writeInt32((int32_t) output);
- remote()->transact(SET_STREAM_OUTPUT, data, &reply);
+ remote()->transact(INVALIDATE_STREAM, data, &reply);
return reply.readInt32();
}
@@ -1044,11 +1043,10 @@ status_t BnAudioFlinger::onTransact(
reply->writeInt32(closeInput((audio_io_handle_t) data.readInt32()));
return NO_ERROR;
} break;
- case SET_STREAM_OUTPUT: {
+ case INVALIDATE_STREAM: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- uint32_t stream = data.readInt32();
- audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
- reply->writeInt32(setStreamOutput((audio_stream_type_t) stream, output));
+ audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
+ reply->writeInt32(invalidateStream(stream));
return NO_ERROR;
} break;
case SET_VOICE_VOLUME: {
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index 4be3c09..1a027a6 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -476,10 +476,11 @@ status_t BnAudioPolicyService::onTransact(
case START_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
- uint32_t stream = data.readInt32();
+ audio_stream_type_t stream =
+ static_cast <audio_stream_type_t>(data.readInt32());
int session = data.readInt32();
reply->writeInt32(static_cast <uint32_t>(startOutput(output,
- (audio_stream_type_t)stream,
+ stream,
session)));
return NO_ERROR;
} break;
@@ -487,10 +488,11 @@ status_t BnAudioPolicyService::onTransact(
case STOP_OUTPUT: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_io_handle_t output = static_cast <audio_io_handle_t>(data.readInt32());
- uint32_t stream = data.readInt32();
+ audio_stream_type_t stream =
+ static_cast <audio_stream_type_t>(data.readInt32());
int session = data.readInt32();
reply->writeInt32(static_cast <uint32_t>(stopOutput(output,
- (audio_stream_type_t)stream,
+ stream,
session)));
return NO_ERROR;
} break;
@@ -633,7 +635,7 @@ status_t BnAudioPolicyService::onTransact(
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
uint32_t inPastMs = (uint32_t)data.readInt32();
- reply->writeInt32( isStreamActive((audio_stream_type_t) stream, inPastMs) );
+ reply->writeInt32( isStreamActive(stream, inPastMs) );
return NO_ERROR;
} break;
@@ -641,7 +643,7 @@ status_t BnAudioPolicyService::onTransact(
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_stream_type_t stream = (audio_stream_type_t) data.readInt32();
uint32_t inPastMs = (uint32_t)data.readInt32();
- reply->writeInt32( isStreamActiveRemotely((audio_stream_type_t) stream, inPastMs) );
+ reply->writeInt32( isStreamActiveRemotely(stream, inPastMs) );
return NO_ERROR;
} break;
diff --git a/media/libmedia/IMediaHTTPConnection.cpp b/media/libmedia/IMediaHTTPConnection.cpp
index 622d9cf..7e26ee6 100644
--- a/media/libmedia/IMediaHTTPConnection.cpp
+++ b/media/libmedia/IMediaHTTPConnection.cpp
@@ -33,6 +33,7 @@ enum {
READ_AT,
GET_SIZE,
GET_MIME_TYPE,
+ GET_URI
};
struct BpMediaHTTPConnection : public BpInterface<IMediaHTTPConnection> {
@@ -94,7 +95,10 @@ struct BpMediaHTTPConnection : public BpInterface<IMediaHTTPConnection> {
data.writeInt32(size);
status_t err = remote()->transact(READ_AT, data, &reply);
- CHECK_EQ(err, (status_t)OK);
+ if (err != OK) {
+ ALOGE("remote readAt failed");
+ return UNKNOWN_ERROR;
+ }
int32_t exceptionCode = reply.readExceptionCode();
@@ -147,6 +151,26 @@ struct BpMediaHTTPConnection : public BpInterface<IMediaHTTPConnection> {
return OK;
}
+ virtual status_t getUri(String8 *uri) {
+ *uri = String8("");
+
+ Parcel data, reply;
+ data.writeInterfaceToken(
+ IMediaHTTPConnection::getInterfaceDescriptor());
+
+ remote()->transact(GET_URI, data, &reply);
+
+ int32_t exceptionCode = reply.readExceptionCode();
+
+ if (exceptionCode) {
+ return UNKNOWN_ERROR;
+ }
+
+ *uri = String8(reply.readString16());
+
+ return OK;
+ }
+
private:
sp<IMemory> mMemory;
};
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index e914b34..f0f1832 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -90,7 +90,7 @@ int JetPlayer::init()
pLibConfig->sampleRate,
AUDIO_FORMAT_PCM_16_BIT,
audio_channel_out_mask_from_count(pLibConfig->numChannels),
- mTrackBufferSize,
+ (size_t) mTrackBufferSize,
AUDIO_OUTPUT_FLAG_NONE);
// create render and playback thread
diff --git a/media/libmedia/SoundPool.cpp b/media/libmedia/SoundPool.cpp
index 4885b4f..a55e09c 100644
--- a/media/libmedia/SoundPool.cpp
+++ b/media/libmedia/SoundPool.cpp
@@ -587,7 +587,7 @@ void SoundChannel::play(const sp<Sample>& sample, int nextChannelID, float leftV
uint32_t sampleRate = uint32_t(float(sample->sampleRate()) * rate + 0.5);
uint32_t totalFrames = (kDefaultBufferCount * afFrameCount * sampleRate) / afSampleRate;
uint32_t bufferFrames = (totalFrames + (kDefaultBufferCount - 1)) / kDefaultBufferCount;
- uint32_t frameCount = 0;
+ size_t frameCount = 0;
if (loop) {
frameCount = sample->size()/numChannels/