diff options
Diffstat (limited to 'media')
197 files changed, 17053 insertions, 2832 deletions
diff --git a/media/common_time/Android.mk b/media/common_time/Android.mk new file mode 100644 index 0000000..526f17b --- /dev/null +++ b/media/common_time/Android.mk @@ -0,0 +1,21 @@ +LOCAL_PATH:= $(call my-dir) +# +# libcommon_time_client +# (binder marshalers for ICommonClock as well as common clock and local clock +# helper code) +# + +include $(CLEAR_VARS) + +LOCAL_MODULE := libcommon_time_client +LOCAL_MODULE_TAGS := optional +LOCAL_SRC_FILES := cc_helper.cpp \ + local_clock.cpp \ + ICommonClock.cpp \ + ICommonTimeConfig.cpp \ + utils.cpp +LOCAL_SHARED_LIBRARIES := libbinder \ + libhardware \ + libutils + +include $(BUILD_SHARED_LIBRARY) diff --git a/media/common_time/ICommonClock.cpp b/media/common_time/ICommonClock.cpp new file mode 100644 index 0000000..28b43ac --- /dev/null +++ b/media/common_time/ICommonClock.cpp @@ -0,0 +1,432 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include <linux/socket.h> + +#include <common_time/ICommonClock.h> +#include <binder/Parcel.h> + +#include "utils.h" + +namespace android { + +/***** ICommonClock *****/ + +enum { + IS_COMMON_TIME_VALID = IBinder::FIRST_CALL_TRANSACTION, + COMMON_TIME_TO_LOCAL_TIME, + LOCAL_TIME_TO_COMMON_TIME, + GET_COMMON_TIME, + GET_COMMON_FREQ, + GET_LOCAL_TIME, + GET_LOCAL_FREQ, + GET_ESTIMATED_ERROR, + GET_TIMELINE_ID, + GET_STATE, + GET_MASTER_ADDRESS, + REGISTER_LISTENER, + UNREGISTER_LISTENER, +}; + +const String16 ICommonClock::kServiceName("common_time.clock"); +const uint64_t ICommonClock::kInvalidTimelineID = 0; +const int32_t ICommonClock::kErrorEstimateUnknown = 0x7FFFFFFF; + +class BpCommonClock : public BpInterface<ICommonClock> +{ + public: + BpCommonClock(const sp<IBinder>& impl) + : BpInterface<ICommonClock>(impl) {} + + virtual status_t isCommonTimeValid(bool* valid, uint32_t* timelineID) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + status_t status = remote()->transact(IS_COMMON_TIME_VALID, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *valid = reply.readInt32(); + *timelineID = reply.readInt32(); + } + } + return status; + } + + virtual status_t commonTimeToLocalTime(int64_t commonTime, + int64_t* localTime) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + data.writeInt64(commonTime); + status_t status = remote()->transact(COMMON_TIME_TO_LOCAL_TIME, + data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *localTime = reply.readInt64(); + } + } + return status; + } + + virtual status_t localTimeToCommonTime(int64_t localTime, + int64_t* commonTime) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + data.writeInt64(localTime); + status_t status = remote()->transact(LOCAL_TIME_TO_COMMON_TIME, + data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *commonTime = reply.readInt64(); + } + } + return status; + } + + virtual status_t getCommonTime(int64_t* commonTime) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_COMMON_TIME, data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *commonTime = reply.readInt64(); + } + } + return status; + } + + virtual status_t getCommonFreq(uint64_t* freq) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_COMMON_FREQ, data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *freq = reply.readInt64(); + } + } + return status; + } + + virtual status_t getLocalTime(int64_t* localTime) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_LOCAL_TIME, data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *localTime = reply.readInt64(); + } + } + return status; + } + + virtual status_t getLocalFreq(uint64_t* freq) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_LOCAL_FREQ, data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *freq = reply.readInt64(); + } + } + return status; + } + + virtual status_t getEstimatedError(int32_t* estimate) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_ESTIMATED_ERROR, data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *estimate = reply.readInt32(); + } + } + return status; + } + + virtual status_t getTimelineID(uint64_t* id) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_TIMELINE_ID, data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *id = static_cast<uint64_t>(reply.readInt64()); + } + } + return status; + } + + virtual status_t getState(State* state) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_STATE, data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *state = static_cast<State>(reply.readInt32()); + } + } + return status; + } + + virtual status_t getMasterAddr(struct sockaddr_storage* addr) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_MASTER_ADDRESS, data, &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) + deserializeSockaddr(&reply, addr); + } + return status; + } + + virtual status_t registerListener( + const sp<ICommonClockListener>& listener) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + data.writeStrongBinder(listener->asBinder()); + + status_t status = remote()->transact(REGISTER_LISTENER, data, &reply); + + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } + + virtual status_t unregisterListener( + const sp<ICommonClockListener>& listener) { + Parcel data, reply; + data.writeInterfaceToken(ICommonClock::getInterfaceDescriptor()); + data.writeStrongBinder(listener->asBinder()); + status_t status = remote()->transact(UNREGISTER_LISTENER, data, &reply); + + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } +}; + +IMPLEMENT_META_INTERFACE(CommonClock, "android.os.ICommonClock"); + +status_t BnCommonClock::onTransact(uint32_t code, + const Parcel& data, + Parcel* reply, + uint32_t flags) { + switch(code) { + case IS_COMMON_TIME_VALID: { + CHECK_INTERFACE(ICommonClock, data, reply); + bool valid; + uint32_t timelineID; + status_t status = isCommonTimeValid(&valid, &timelineID); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt32(valid); + reply->writeInt32(timelineID); + } + return OK; + } break; + + case COMMON_TIME_TO_LOCAL_TIME: { + CHECK_INTERFACE(ICommonClock, data, reply); + int64_t commonTime = data.readInt64(); + int64_t localTime; + status_t status = commonTimeToLocalTime(commonTime, &localTime); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt64(localTime); + } + return OK; + } break; + + case LOCAL_TIME_TO_COMMON_TIME: { + CHECK_INTERFACE(ICommonClock, data, reply); + int64_t localTime = data.readInt64(); + int64_t commonTime; + status_t status = localTimeToCommonTime(localTime, &commonTime); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt64(commonTime); + } + return OK; + } break; + + case GET_COMMON_TIME: { + CHECK_INTERFACE(ICommonClock, data, reply); + int64_t commonTime; + status_t status = getCommonTime(&commonTime); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt64(commonTime); + } + return OK; + } break; + + case GET_COMMON_FREQ: { + CHECK_INTERFACE(ICommonClock, data, reply); + uint64_t freq; + status_t status = getCommonFreq(&freq); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt64(freq); + } + return OK; + } break; + + case GET_LOCAL_TIME: { + CHECK_INTERFACE(ICommonClock, data, reply); + int64_t localTime; + status_t status = getLocalTime(&localTime); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt64(localTime); + } + return OK; + } break; + + case GET_LOCAL_FREQ: { + CHECK_INTERFACE(ICommonClock, data, reply); + uint64_t freq; + status_t status = getLocalFreq(&freq); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt64(freq); + } + return OK; + } break; + + case GET_ESTIMATED_ERROR: { + CHECK_INTERFACE(ICommonClock, data, reply); + int32_t error; + status_t status = getEstimatedError(&error); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt32(error); + } + return OK; + } break; + + case GET_TIMELINE_ID: { + CHECK_INTERFACE(ICommonClock, data, reply); + uint64_t id; + status_t status = getTimelineID(&id); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt64(static_cast<int64_t>(id)); + } + return OK; + } break; + + case GET_STATE: { + CHECK_INTERFACE(ICommonClock, data, reply); + State state; + status_t status = getState(&state); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt32(static_cast<int32_t>(state)); + } + return OK; + } break; + + case GET_MASTER_ADDRESS: { + CHECK_INTERFACE(ICommonClock, data, reply); + struct sockaddr_storage addr; + status_t status = getMasterAddr(&addr); + + if ((status == OK) && !canSerializeSockaddr(&addr)) { + status = UNKNOWN_ERROR; + } + + reply->writeInt32(status); + + if (status == OK) { + serializeSockaddr(reply, &addr); + } + + return OK; + } break; + + case REGISTER_LISTENER: { + CHECK_INTERFACE(ICommonClock, data, reply); + sp<ICommonClockListener> listener = + interface_cast<ICommonClockListener>(data.readStrongBinder()); + status_t status = registerListener(listener); + reply->writeInt32(status); + return OK; + } break; + + case UNREGISTER_LISTENER: { + CHECK_INTERFACE(ICommonClock, data, reply); + sp<ICommonClockListener> listener = + interface_cast<ICommonClockListener>(data.readStrongBinder()); + status_t status = unregisterListener(listener); + reply->writeInt32(status); + return OK; + } break; + } + return BBinder::onTransact(code, data, reply, flags); +} + +/***** ICommonClockListener *****/ + +enum { + ON_TIMELINE_CHANGED = IBinder::FIRST_CALL_TRANSACTION, +}; + +class BpCommonClockListener : public BpInterface<ICommonClockListener> +{ + public: + BpCommonClockListener(const sp<IBinder>& impl) + : BpInterface<ICommonClockListener>(impl) {} + + virtual void onTimelineChanged(uint64_t timelineID) { + Parcel data, reply; + data.writeInterfaceToken( + ICommonClockListener::getInterfaceDescriptor()); + data.writeInt64(timelineID); + remote()->transact(ON_TIMELINE_CHANGED, data, &reply); + } +}; + +IMPLEMENT_META_INTERFACE(CommonClockListener, + "android.os.ICommonClockListener"); + +status_t BnCommonClockListener::onTransact( + uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { + switch(code) { + case ON_TIMELINE_CHANGED: { + CHECK_INTERFACE(ICommonClockListener, data, reply); + uint32_t timelineID = data.readInt64(); + onTimelineChanged(timelineID); + return NO_ERROR; + } break; + } + + return BBinder::onTransact(code, data, reply, flags); +} + +}; // namespace android diff --git a/media/common_time/ICommonTimeConfig.cpp b/media/common_time/ICommonTimeConfig.cpp new file mode 100644 index 0000000..8eb37cb --- /dev/null +++ b/media/common_time/ICommonTimeConfig.cpp @@ -0,0 +1,508 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include <linux/socket.h> + +#include <common_time/ICommonTimeConfig.h> +#include <binder/Parcel.h> + +#include "utils.h" + +namespace android { + +/***** ICommonTimeConfig *****/ + +enum { + GET_MASTER_ELECTION_PRIORITY = IBinder::FIRST_CALL_TRANSACTION, + SET_MASTER_ELECTION_PRIORITY, + GET_MASTER_ELECTION_ENDPOINT, + SET_MASTER_ELECTION_ENDPOINT, + GET_MASTER_ELECTION_GROUP_ID, + SET_MASTER_ELECTION_GROUP_ID, + GET_INTERFACE_BINDING, + SET_INTERFACE_BINDING, + GET_MASTER_ANNOUNCE_INTERVAL, + SET_MASTER_ANNOUNCE_INTERVAL, + GET_CLIENT_SYNC_INTERVAL, + SET_CLIENT_SYNC_INTERVAL, + GET_PANIC_THRESHOLD, + SET_PANIC_THRESHOLD, + GET_AUTO_DISABLE, + SET_AUTO_DISABLE, + FORCE_NETWORKLESS_MASTER_MODE, +}; + +const String16 ICommonTimeConfig::kServiceName("common_time.config"); + +class BpCommonTimeConfig : public BpInterface<ICommonTimeConfig> +{ + public: + BpCommonTimeConfig(const sp<IBinder>& impl) + : BpInterface<ICommonTimeConfig>(impl) {} + + virtual status_t getMasterElectionPriority(uint8_t *priority) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_MASTER_ELECTION_PRIORITY, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *priority = static_cast<uint8_t>(reply.readInt32()); + } + } + + return status; + } + + virtual status_t setMasterElectionPriority(uint8_t priority) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + data.writeInt32(static_cast<int32_t>(priority)); + status_t status = remote()->transact(SET_MASTER_ELECTION_PRIORITY, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } + + virtual status_t getMasterElectionEndpoint(struct sockaddr_storage *addr) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_MASTER_ELECTION_ENDPOINT, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + deserializeSockaddr(&reply, addr); + } + } + + return status; + } + + virtual status_t setMasterElectionEndpoint( + const struct sockaddr_storage *addr) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + if (!canSerializeSockaddr(addr)) + return BAD_VALUE; + if (NULL == addr) { + data.writeInt32(0); + } else { + data.writeInt32(1); + serializeSockaddr(&data, addr); + } + status_t status = remote()->transact(SET_MASTER_ELECTION_ENDPOINT, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } + + virtual status_t getMasterElectionGroupId(uint64_t *id) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_MASTER_ELECTION_GROUP_ID, + data, + &reply); + + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *id = static_cast<uint64_t>(reply.readInt64()); + } + } + + return status; + } + + virtual status_t setMasterElectionGroupId(uint64_t id) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + data.writeInt64(id); + status_t status = remote()->transact(SET_MASTER_ELECTION_GROUP_ID, + data, + &reply); + + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } + + virtual status_t getInterfaceBinding(String16& ifaceName) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_INTERFACE_BINDING, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + ifaceName = reply.readString16(); + } + } + + return status; + } + + virtual status_t setInterfaceBinding(const String16& ifaceName) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + data.writeString16(ifaceName); + status_t status = remote()->transact(SET_INTERFACE_BINDING, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } + + virtual status_t getMasterAnnounceInterval(int *interval) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_MASTER_ANNOUNCE_INTERVAL, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *interval = reply.readInt32(); + } + } + + return status; + } + + virtual status_t setMasterAnnounceInterval(int interval) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + data.writeInt32(interval); + status_t status = remote()->transact(SET_MASTER_ANNOUNCE_INTERVAL, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } + + virtual status_t getClientSyncInterval(int *interval) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_CLIENT_SYNC_INTERVAL, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *interval = reply.readInt32(); + } + } + + return status; + } + + virtual status_t setClientSyncInterval(int interval) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + data.writeInt32(interval); + status_t status = remote()->transact(SET_CLIENT_SYNC_INTERVAL, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } + + virtual status_t getPanicThreshold(int *threshold) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_PANIC_THRESHOLD, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *threshold = reply.readInt32(); + } + } + + return status; + } + + virtual status_t setPanicThreshold(int threshold) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + data.writeInt32(threshold); + status_t status = remote()->transact(SET_PANIC_THRESHOLD, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } + + virtual status_t getAutoDisable(bool *autoDisable) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_AUTO_DISABLE, + data, + &reply); + if (status == OK) { + status = reply.readInt32(); + if (status == OK) { + *autoDisable = (0 != reply.readInt32()); + } + } + + return status; + } + + virtual status_t setAutoDisable(bool autoDisable) { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + data.writeInt32(autoDisable ? 1 : 0); + status_t status = remote()->transact(SET_AUTO_DISABLE, + data, + &reply); + + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } + + virtual status_t forceNetworklessMasterMode() { + Parcel data, reply; + data.writeInterfaceToken(ICommonTimeConfig::getInterfaceDescriptor()); + status_t status = remote()->transact(FORCE_NETWORKLESS_MASTER_MODE, + data, + &reply); + + if (status == OK) { + status = reply.readInt32(); + } + + return status; + } +}; + +IMPLEMENT_META_INTERFACE(CommonTimeConfig, "android.os.ICommonTimeConfig"); + +status_t BnCommonTimeConfig::onTransact(uint32_t code, + const Parcel& data, + Parcel* reply, + uint32_t flags) { + switch(code) { + case GET_MASTER_ELECTION_PRIORITY: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + uint8_t priority; + status_t status = getMasterElectionPriority(&priority); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt32(static_cast<int32_t>(priority)); + } + return OK; + } break; + + case SET_MASTER_ELECTION_PRIORITY: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + uint8_t priority = static_cast<uint8_t>(data.readInt32()); + status_t status = setMasterElectionPriority(priority); + reply->writeInt32(status); + return OK; + } break; + + case GET_MASTER_ELECTION_ENDPOINT: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + struct sockaddr_storage addr; + status_t status = getMasterElectionEndpoint(&addr); + + if ((status == OK) && !canSerializeSockaddr(&addr)) { + status = UNKNOWN_ERROR; + } + + reply->writeInt32(status); + + if (status == OK) { + serializeSockaddr(reply, &addr); + } + + return OK; + } break; + + case SET_MASTER_ELECTION_ENDPOINT: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + struct sockaddr_storage addr; + int hasAddr = data.readInt32(); + + status_t status; + if (hasAddr) { + deserializeSockaddr(&data, &addr); + status = setMasterElectionEndpoint(&addr); + } else { + status = setMasterElectionEndpoint(&addr); + } + + reply->writeInt32(status); + return OK; + } break; + + case GET_MASTER_ELECTION_GROUP_ID: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + uint64_t id; + status_t status = getMasterElectionGroupId(&id); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt64(id); + } + return OK; + } break; + + case SET_MASTER_ELECTION_GROUP_ID: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + uint64_t id = static_cast<uint64_t>(data.readInt64()); + status_t status = setMasterElectionGroupId(id); + reply->writeInt32(status); + return OK; + } break; + + case GET_INTERFACE_BINDING: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + String16 ret; + status_t status = getInterfaceBinding(ret); + reply->writeInt32(status); + if (status == OK) { + reply->writeString16(ret); + } + return OK; + } break; + + case SET_INTERFACE_BINDING: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + String16 ifaceName; + ifaceName = data.readString16(); + status_t status = setInterfaceBinding(ifaceName); + reply->writeInt32(status); + return OK; + } break; + + case GET_MASTER_ANNOUNCE_INTERVAL: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + int interval; + status_t status = getMasterAnnounceInterval(&interval); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt32(interval); + } + return OK; + } break; + + case SET_MASTER_ANNOUNCE_INTERVAL: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + int interval = data.readInt32(); + status_t status = setMasterAnnounceInterval(interval); + reply->writeInt32(status); + return OK; + } break; + + case GET_CLIENT_SYNC_INTERVAL: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + int interval; + status_t status = getClientSyncInterval(&interval); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt32(interval); + } + return OK; + } break; + + case SET_CLIENT_SYNC_INTERVAL: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + int interval = data.readInt32(); + status_t status = setClientSyncInterval(interval); + reply->writeInt32(status); + return OK; + } break; + + case GET_PANIC_THRESHOLD: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + int threshold; + status_t status = getPanicThreshold(&threshold); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt32(threshold); + } + return OK; + } break; + + case SET_PANIC_THRESHOLD: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + int threshold = data.readInt32(); + status_t status = setPanicThreshold(threshold); + reply->writeInt32(status); + return OK; + } break; + + case GET_AUTO_DISABLE: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + bool autoDisable; + status_t status = getAutoDisable(&autoDisable); + reply->writeInt32(status); + if (status == OK) { + reply->writeInt32(autoDisable ? 1 : 0); + } + return OK; + } break; + + case SET_AUTO_DISABLE: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + bool autoDisable = (0 != data.readInt32()); + status_t status = setAutoDisable(autoDisable); + reply->writeInt32(status); + return OK; + } break; + + case FORCE_NETWORKLESS_MASTER_MODE: { + CHECK_INTERFACE(ICommonTimeConfig, data, reply); + status_t status = forceNetworklessMasterMode(); + reply->writeInt32(status); + return OK; + } break; + } + return BBinder::onTransact(code, data, reply, flags); +} + +}; // namespace android + diff --git a/media/common_time/cc_helper.cpp b/media/common_time/cc_helper.cpp new file mode 100644 index 0000000..8d8556c --- /dev/null +++ b/media/common_time/cc_helper.cpp @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <stdint.h> + +#include <common_time/cc_helper.h> +#include <common_time/ICommonClock.h> +#include <utils/threads.h> + +namespace android { + +Mutex CCHelper::lock_; +sp<ICommonClock> CCHelper::common_clock_; +sp<ICommonClockListener> CCHelper::common_clock_listener_; +uint32_t CCHelper::ref_count_ = 0; + +bool CCHelper::verifyClock_l() { + bool ret = false; + + if (common_clock_ == NULL) { + common_clock_ = ICommonClock::getInstance(); + if (common_clock_ == NULL) + goto bailout; + } + + if (ref_count_ > 0) { + if (common_clock_listener_ == NULL) { + common_clock_listener_ = new CommonClockListener(); + if (common_clock_listener_ == NULL) + goto bailout; + + if (OK != common_clock_->registerListener(common_clock_listener_)) + goto bailout; + } + } + + ret = true; + +bailout: + if (!ret) { + common_clock_listener_ = NULL; + common_clock_ = NULL; + } + return ret; +} + +CCHelper::CCHelper() { + Mutex::Autolock lock(&lock_); + ref_count_++; + verifyClock_l(); +} + +CCHelper::~CCHelper() { + Mutex::Autolock lock(&lock_); + + assert(ref_count_ > 0); + ref_count_--; + + // If we were the last CCHelper instance in the system, and we had + // previously register a listener, unregister it now so that the common time + // service has the chance to go into auto-disabled mode. + if (!ref_count_ && + (common_clock_ != NULL) && + (common_clock_listener_ != NULL)) { + common_clock_->unregisterListener(common_clock_listener_); + common_clock_listener_ = NULL; + } +} + +void CCHelper::CommonClockListener::onTimelineChanged(uint64_t timelineID) { + // do nothing; listener is only really used as a token so the server can + // find out when clients die. +} + +// Helper methods which attempts to make calls to the common time binder +// service. If the first attempt fails with DEAD_OBJECT, the helpers will +// attempt to make a connection to the service again (assuming that the process +// hosting the service had crashed and the client proxy we are holding is dead) +// If the second attempt fails, or no connection can be made, the we let the +// error propagate up the stack and let the caller deal with the situation as +// best they can. +#define CCHELPER_METHOD(decl, call) \ + status_t CCHelper::decl { \ + Mutex::Autolock lock(&lock_); \ + \ + if (!verifyClock_l()) \ + return DEAD_OBJECT; \ + \ + status_t status = common_clock_->call; \ + if (DEAD_OBJECT == status) { \ + if (!verifyClock_l()) \ + return DEAD_OBJECT; \ + status = common_clock_->call; \ + } \ + \ + return status; \ + } + +#define VERIFY_CLOCK() + +CCHELPER_METHOD(isCommonTimeValid(bool* valid, uint32_t* timelineID), + isCommonTimeValid(valid, timelineID)) +CCHELPER_METHOD(commonTimeToLocalTime(int64_t commonTime, int64_t* localTime), + commonTimeToLocalTime(commonTime, localTime)) +CCHELPER_METHOD(localTimeToCommonTime(int64_t localTime, int64_t* commonTime), + localTimeToCommonTime(localTime, commonTime)) +CCHELPER_METHOD(getCommonTime(int64_t* commonTime), + getCommonTime(commonTime)) +CCHELPER_METHOD(getCommonFreq(uint64_t* freq), + getCommonFreq(freq)) +CCHELPER_METHOD(getLocalTime(int64_t* localTime), + getLocalTime(localTime)) +CCHELPER_METHOD(getLocalFreq(uint64_t* freq), + getLocalFreq(freq)) + +} // namespace android diff --git a/media/common_time/local_clock.cpp b/media/common_time/local_clock.cpp new file mode 100644 index 0000000..a7c61fc --- /dev/null +++ b/media/common_time/local_clock.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "common_time" +#include <utils/Log.h> + +#include <assert.h> +#include <stdint.h> + +#include <common_time/local_clock.h> +#include <hardware/hardware.h> +#include <hardware/local_time_hal.h> +#include <utils/Errors.h> +#include <utils/threads.h> + +namespace android { + +Mutex LocalClock::dev_lock_; +local_time_hw_device_t* LocalClock::dev_ = NULL; + +LocalClock::LocalClock() { + int res; + const hw_module_t* mod; + + AutoMutex lock(&dev_lock_); + + if (dev_ != NULL) + return; + + res = hw_get_module_by_class(LOCAL_TIME_HARDWARE_MODULE_ID, NULL, &mod); + if (res) { + ALOGE("Failed to open local time HAL module (res = %d)", res); + } else { + res = local_time_hw_device_open(mod, &dev_); + if (res) { + ALOGE("Failed to open local time HAL device (res = %d)", res); + dev_ = NULL; + } + } +} + +bool LocalClock::initCheck() { + return (NULL != dev_); +} + +int64_t LocalClock::getLocalTime() { + assert(NULL != dev_); + assert(NULL != dev_->get_local_time); + + return dev_->get_local_time(dev_); +} + +uint64_t LocalClock::getLocalFreq() { + assert(NULL != dev_); + assert(NULL != dev_->get_local_freq); + + return dev_->get_local_freq(dev_); +} + +status_t LocalClock::setLocalSlew(int16_t rate) { + assert(NULL != dev_); + + if (!dev_->set_local_slew) + return INVALID_OPERATION; + + return static_cast<status_t>(dev_->set_local_slew(dev_, rate)); +} + +int32_t LocalClock::getDebugLog(struct local_time_debug_event* records, + int max_records) { + assert(NULL != dev_); + + if (!dev_->get_debug_log) + return INVALID_OPERATION; + + return dev_->get_debug_log(dev_, records, max_records); +} + +} // namespace android diff --git a/media/common_time/utils.cpp b/media/common_time/utils.cpp new file mode 100644 index 0000000..6539171 --- /dev/null +++ b/media/common_time/utils.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <arpa/inet.h> +#include <linux/socket.h> + +#include <binder/Parcel.h> + +namespace android { + +bool canSerializeSockaddr(const struct sockaddr_storage* addr) { + switch (addr->ss_family) { + case AF_INET: + case AF_INET6: + return true; + default: + return false; + } +} + +void serializeSockaddr(Parcel* p, const struct sockaddr_storage* addr) { + switch (addr->ss_family) { + case AF_INET: { + const struct sockaddr_in* s = + reinterpret_cast<const struct sockaddr_in*>(addr); + p->writeInt32(AF_INET); + p->writeInt32(ntohl(s->sin_addr.s_addr)); + p->writeInt32(static_cast<int32_t>(ntohs(s->sin_port))); + } break; + + case AF_INET6: { + const struct sockaddr_in6* s = + reinterpret_cast<const struct sockaddr_in6*>(addr); + const int32_t* a = + reinterpret_cast<const int32_t*>(s->sin6_addr.s6_addr); + p->writeInt32(AF_INET6); + p->writeInt32(ntohl(a[0])); + p->writeInt32(ntohl(a[1])); + p->writeInt32(ntohl(a[2])); + p->writeInt32(ntohl(a[3])); + p->writeInt32(static_cast<int32_t>(ntohs(s->sin6_port))); + p->writeInt32(ntohl(s->sin6_flowinfo)); + p->writeInt32(ntohl(s->sin6_scope_id)); + } break; + } +} + +void deserializeSockaddr(const Parcel* p, struct sockaddr_storage* addr) { + memset(addr, 0, sizeof(addr)); + + addr->ss_family = p->readInt32(); + switch(addr->ss_family) { + case AF_INET: { + struct sockaddr_in* s = + reinterpret_cast<struct sockaddr_in*>(addr); + s->sin_addr.s_addr = htonl(p->readInt32()); + s->sin_port = htons(static_cast<uint16_t>(p->readInt32())); + } break; + + case AF_INET6: { + struct sockaddr_in6* s = + reinterpret_cast<struct sockaddr_in6*>(addr); + int32_t* a = reinterpret_cast<int32_t*>(s->sin6_addr.s6_addr); + + a[0] = htonl(p->readInt32()); + a[1] = htonl(p->readInt32()); + a[2] = htonl(p->readInt32()); + a[3] = htonl(p->readInt32()); + s->sin6_port = htons(static_cast<uint16_t>(p->readInt32())); + s->sin6_flowinfo = htonl(p->readInt32()); + s->sin6_scope_id = htonl(p->readInt32()); + } break; + } +} + +} // namespace android diff --git a/media/common_time/utils.h b/media/common_time/utils.h new file mode 100644 index 0000000..ce79d0d --- /dev/null +++ b/media/common_time/utils.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_LIBCOMMONCLOCK_UTILS_H +#define ANDROID_LIBCOMMONCLOCK_UTILS_H + +#include <linux/socket.h> + +#include <binder/Parcel.h> +#include <utils/Errors.h> + +namespace android { + +extern bool canSerializeSockaddr(const struct sockaddr_storage* addr); +extern void serializeSockaddr(Parcel* p, const struct sockaddr_storage* addr); +extern status_t deserializeSockaddr(const Parcel* p, + struct sockaddr_storage* addr); + +}; // namespace android + +#endif // ANDROID_LIBCOMMONCLOCK_UTILS_H diff --git a/media/libaah_rtp/Android.mk b/media/libaah_rtp/Android.mk new file mode 100644 index 0000000..54fd9ec --- /dev/null +++ b/media/libaah_rtp/Android.mk @@ -0,0 +1,40 @@ +LOCAL_PATH:= $(call my-dir) +# +# libaah_rtp +# + +include $(CLEAR_VARS) + +LOCAL_MODULE := libaah_rtp +LOCAL_MODULE_TAGS := optional + +LOCAL_SRC_FILES := \ + aah_decoder_pump.cpp \ + aah_rx_player.cpp \ + aah_rx_player_core.cpp \ + aah_rx_player_ring_buffer.cpp \ + aah_rx_player_substream.cpp \ + aah_tx_packet.cpp \ + aah_tx_player.cpp \ + aah_tx_sender.cpp \ + pipe_event.cpp + +LOCAL_C_INCLUDES := \ + frameworks/base/include \ + frameworks/base/include/media/stagefright/openmax \ + frameworks/base/media \ + frameworks/base/media/libstagefright + +LOCAL_SHARED_LIBRARIES := \ + libcommon_time_client \ + libbinder \ + libmedia \ + libstagefright \ + libstagefright_foundation \ + libutils + +LOCAL_LDLIBS := \ + -lpthread + +include $(BUILD_SHARED_LIBRARY) + diff --git a/media/libaah_rtp/aah_decoder_pump.cpp b/media/libaah_rtp/aah_decoder_pump.cpp new file mode 100644 index 0000000..28b8c7b --- /dev/null +++ b/media/libaah_rtp/aah_decoder_pump.cpp @@ -0,0 +1,520 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "LibAAH_RTP" +//#define LOG_NDEBUG 0 +#include <utils/Log.h> + +#include <poll.h> +#include <pthread.h> + +#include <common_time/cc_helper.h> +#include <media/AudioSystem.h> +#include <media/AudioTrack.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/MetaData.h> +#include <media/stagefright/OMXClient.h> +#include <media/stagefright/OMXCodec.h> +#include <media/stagefright/Utils.h> +#include <utils/Timers.h> +#include <utils/threads.h> + +#include "aah_decoder_pump.h" + +namespace android { + +static const long long kLongDecodeErrorThreshold = 1000000ll; +static const uint32_t kMaxLongErrorsBeforeFatal = 3; +static const uint32_t kMaxErrorsBeforeFatal = 60; + +AAH_DecoderPump::AAH_DecoderPump(OMXClient& omx) + : omx_(omx) + , thread_status_(OK) + , renderer_(NULL) + , last_queued_pts_valid_(false) + , last_queued_pts_(0) + , last_ts_transform_valid_(false) + , last_volume_(0xFF) { + thread_ = new ThreadWrapper(this); +} + +AAH_DecoderPump::~AAH_DecoderPump() { + shutdown(); +} + +status_t AAH_DecoderPump::initCheck() { + if (thread_ == NULL) { + ALOGE("Failed to allocate thread"); + return NO_MEMORY; + } + + return OK; +} + +status_t AAH_DecoderPump::queueForDecode(MediaBuffer* buf) { + if (NULL == buf) { + return BAD_VALUE; + } + + if (OK != thread_status_) { + return thread_status_; + } + + { // Explicit scope for AutoMutex pattern. + AutoMutex lock(&thread_lock_); + in_queue_.push_back(buf); + } + + thread_cond_.signal(); + + return OK; +} + +void AAH_DecoderPump::queueToRenderer(MediaBuffer* decoded_sample) { + Mutex::Autolock lock(&render_lock_); + sp<MetaData> meta; + int64_t ts; + status_t res; + + // Fetch the metadata and make sure the sample has a timestamp. We + // cannot render samples which are missing PTSs. + meta = decoded_sample->meta_data(); + if ((meta == NULL) || (!meta->findInt64(kKeyTime, &ts))) { + ALOGV("Decoded sample missing timestamp, cannot render."); + CHECK(false); + } else { + // If we currently are not holding on to a renderer, go ahead and + // make one now. + if (NULL == renderer_) { + renderer_ = new TimedAudioTrack(); + if (NULL != renderer_) { + int frameCount; + AudioTrack::getMinFrameCount(&frameCount, + AUDIO_STREAM_DEFAULT, + static_cast<int>(format_sample_rate_)); + int ch_format = (format_channels_ == 1) + ? AUDIO_CHANNEL_OUT_MONO + : AUDIO_CHANNEL_OUT_STEREO; + + res = renderer_->set(AUDIO_STREAM_DEFAULT, + format_sample_rate_, + AUDIO_FORMAT_PCM_16_BIT, + ch_format, + frameCount); + if (res != OK) { + ALOGE("Failed to setup audio renderer. (res = %d)", res); + delete renderer_; + renderer_ = NULL; + } else { + CHECK(last_ts_transform_valid_); + + res = renderer_->setMediaTimeTransform( + last_ts_transform_, TimedAudioTrack::COMMON_TIME); + if (res != NO_ERROR) { + ALOGE("Failed to set media time transform on AudioTrack" + " (res = %d)", res); + delete renderer_; + renderer_ = NULL; + } else { + float volume = static_cast<float>(last_volume_) + / 255.0f; + if (renderer_->setVolume(volume, volume) != OK) { + ALOGW("%s: setVolume failed", __FUNCTION__); + } + + renderer_->start(); + } + } + } else { + ALOGE("Failed to allocate AudioTrack to use as a renderer."); + } + } + + if (NULL != renderer_) { + uint8_t* decoded_data = + reinterpret_cast<uint8_t*>(decoded_sample->data()); + uint32_t decoded_amt = decoded_sample->range_length(); + decoded_data += decoded_sample->range_offset(); + + sp<IMemory> pcm_payload; + res = renderer_->allocateTimedBuffer(decoded_amt, &pcm_payload); + if (res != OK) { + ALOGE("Failed to allocate %d byte audio track buffer." + " (res = %d)", decoded_amt, res); + } else { + memcpy(pcm_payload->pointer(), decoded_data, decoded_amt); + + res = renderer_->queueTimedBuffer(pcm_payload, ts); + if (res != OK) { + ALOGE("Failed to queue %d byte audio track buffer with" + " media PTS %lld. (res = %d)", decoded_amt, ts, res); + } else { + last_queued_pts_valid_ = true; + last_queued_pts_ = ts; + } + } + + } else { + ALOGE("No renderer, dropping audio payload."); + } + } +} + +void AAH_DecoderPump::stopAndCleanupRenderer() { + if (NULL == renderer_) { + return; + } + + renderer_->stop(); + delete renderer_; + renderer_ = NULL; +} + +void AAH_DecoderPump::setRenderTSTransform(const LinearTransform& trans) { + Mutex::Autolock lock(&render_lock_); + + if (last_ts_transform_valid_ && !memcmp(&trans, + &last_ts_transform_, + sizeof(trans))) { + return; + } + + last_ts_transform_ = trans; + last_ts_transform_valid_ = true; + + if (NULL != renderer_) { + status_t res = renderer_->setMediaTimeTransform( + last_ts_transform_, TimedAudioTrack::COMMON_TIME); + if (res != NO_ERROR) { + ALOGE("Failed to set media time transform on AudioTrack" + " (res = %d)", res); + } + } +} + +void AAH_DecoderPump::setRenderVolume(uint8_t volume) { + Mutex::Autolock lock(&render_lock_); + + if (volume == last_volume_) { + return; + } + + last_volume_ = volume; + if (renderer_ != NULL) { + float volume = static_cast<float>(last_volume_) / 255.0f; + if (renderer_->setVolume(volume, volume) != OK) { + ALOGW("%s: setVolume failed", __FUNCTION__); + } + } +} + +// isAboutToUnderflow is something of a hack used to figure out when it might be +// time to give up on trying to fill in a gap in the RTP sequence and simply +// move on with a discontinuity. If we had perfect knowledge of when we were +// going to underflow, it would not be a hack, but unfortunately we do not. +// Right now, we just take the PTS of the last sample queued, and check to see +// if its presentation time is within kAboutToUnderflowThreshold from now. If +// it is, then we say that we are about to underflow. This decision is based on +// two (possibly invalid) assumptions. +// +// 1) The transmitter is leading the clock by more than +// kAboutToUnderflowThreshold. +// 2) The delta between the PTS of the last sample queued and the next sample +// is less than the transmitter's clock lead amount. +// +// Right now, the default transmitter lead time is 1 second, which is a pretty +// large number and greater than the 50mSec that kAboutToUnderflowThreshold is +// currently set to. This should satisfy assumption #1 for now, but changes to +// the transmitter clock lead time could effect this. +// +// For non-sparse streams with a homogeneous sample rate (the vast majority of +// streams in the world), the delta between any two adjacent PTSs will always be +// the homogeneous sample period. It is very uncommon to see a sample period +// greater than the 1 second clock lead we are currently using, and you +// certainly will not see it in an MP3 file which should satisfy assumption #2. +// Sparse audio streams (where no audio is transmitted for long periods of +// silence) and extremely low framerate video stream (like an MPEG-2 slideshow +// or the video stream for a pay TV audio channel) are examples of streams which +// might violate assumption #2. +bool AAH_DecoderPump::isAboutToUnderflow(int64_t threshold) { + Mutex::Autolock lock(&render_lock_); + + // If we have never queued anything to the decoder, we really don't know if + // we are going to underflow or not. + if (!last_queued_pts_valid_ || !last_ts_transform_valid_) { + return false; + } + + // Don't have access to Common Time? If so, then things are Very Bad + // elsewhere in the system; it pretty much does not matter what we do here. + // Since we cannot really tell if we are about to underflow or not, its + // probably best to assume that we are not and proceed accordingly. + int64_t tt_now; + if (OK != cc_helper_.getCommonTime(&tt_now)) { + return false; + } + + // Transform from media time to common time. + int64_t last_queued_pts_tt; + if (!last_ts_transform_.doForwardTransform(last_queued_pts_, + &last_queued_pts_tt)) { + return false; + } + + // Check to see if we are underflowing. + return ((tt_now + threshold - last_queued_pts_tt) > 0); +} + +void* AAH_DecoderPump::workThread() { + // No need to lock when accessing decoder_ from the thread. The + // implementation of init and shutdown ensure that other threads never touch + // decoder_ while the work thread is running. + CHECK(decoder_ != NULL); + CHECK(format_ != NULL); + + // Start the decoder and note its result code. If something goes horribly + // wrong, callers of queueForDecode and getOutput will be able to detect + // that the thread encountered a fatal error and shut down by examining + // thread_status_. + thread_status_ = decoder_->start(format_.get()); + if (OK != thread_status_) { + ALOGE("AAH_DecoderPump's work thread failed to start decoder" + " (res = %d)", thread_status_); + return NULL; + } + + DurationTimer decode_timer; + uint32_t consecutive_long_errors = 0; + uint32_t consecutive_errors = 0; + + while (!thread_->exitPending()) { + status_t res; + MediaBuffer* bufOut = NULL; + + decode_timer.start(); + res = decoder_->read(&bufOut); + decode_timer.stop(); + + if (res == INFO_FORMAT_CHANGED) { + // Format has changed. Destroy our current renderer so that a new + // one can be created during queueToRenderer with the proper format. + // + // TODO : In order to transition seamlessly, we should change this + // to put the old renderer in a queue to play out completely before + // we destroy it. We can still create a new renderer, the timed + // nature of the renderer should ensure a seamless splice. + stopAndCleanupRenderer(); + res = OK; + } + + // Try to be a little nuanced in our handling of actual decode errors. + // Errors could happen because of minor stream corruption or because of + // transient resource limitations. In these cases, we would rather drop + // a little bit of output and ride out the unpleasantness then throw up + // our hands and abort everything. + // + // OTOH - When things are really bad (like we have a non-transient + // resource or bookkeeping issue, or the stream being fed to us is just + // complete and total garbage) we really want to terminate playback and + // raise an error condition all the way up to the application level so + // they can deal with it. + // + // Unfortunately, the error codes returned by the decoder can be a + // little non-specific. For example, if an OMXCodec times out + // attempting to obtain an output buffer, the error we get back is a + // generic -1. Try to distinguish between this resource timeout error + // and ES corruption error by timing how long the decode operation + // takes. Maintain accounting for both errors and "long errors". If we + // get more than a certain number consecutive errors of either type, + // consider it fatal and shutdown (which will cause the error to + // propagate all of the way up to the application level). The threshold + // for "long errors" is deliberately much lower than that of normal + // decode errors, both because of how long they take to happen and + // because they generally indicate resource limitation errors which are + // unlikely to go away in pathologically bad cases (in contrast to + // stream corruption errors which might happen 20 times in a row and + // then be suddenly OK again) + if (res != OK) { + consecutive_errors++; + if (decode_timer.durationUsecs() >= kLongDecodeErrorThreshold) + consecutive_long_errors++; + + CHECK(NULL == bufOut); + + ALOGW("%s: Failed to decode data (res = %d)", + __PRETTY_FUNCTION__, res); + + if ((consecutive_errors >= kMaxErrorsBeforeFatal) || + (consecutive_long_errors >= kMaxLongErrorsBeforeFatal)) { + ALOGE("%s: Maximum decode error threshold has been reached." + " There have been %d consecutive decode errors, and %d" + " consecutive decode operations which resulted in errors" + " and took more than %lld uSec to process. The last" + " decode operation took %lld uSec.", + __PRETTY_FUNCTION__, + consecutive_errors, consecutive_long_errors, + kLongDecodeErrorThreshold, decode_timer.durationUsecs()); + thread_status_ = res; + break; + } + + continue; + } + + if (NULL == bufOut) { + ALOGW("%s: Successful decode, but no buffer produced", + __PRETTY_FUNCTION__); + continue; + } + + // Successful decode (with actual output produced). Clear the error + // counters. + consecutive_errors = 0; + consecutive_long_errors = 0; + + queueToRenderer(bufOut); + bufOut->release(); + } + + decoder_->stop(); + stopAndCleanupRenderer(); + + return NULL; +} + +status_t AAH_DecoderPump::init(const sp<MetaData>& params) { + Mutex::Autolock lock(&init_lock_); + + if (decoder_ != NULL) { + // already inited + return OK; + } + + if (params == NULL) { + return BAD_VALUE; + } + + if (!params->findInt32(kKeyChannelCount, &format_channels_)) { + return BAD_VALUE; + } + + if (!params->findInt32(kKeySampleRate, &format_sample_rate_)) { + return BAD_VALUE; + } + + CHECK(OK == thread_status_); + CHECK(decoder_ == NULL); + + status_t ret_val = UNKNOWN_ERROR; + + // Cache the format and attempt to create the decoder. + format_ = params; + decoder_ = OMXCodec::Create( + omx_.interface(), // IOMX Handle + format_, // Metadata for substream (indicates codec) + false, // Make a decoder, not an encoder + sp<MediaSource>(this)); // We will be the source for this codec. + + if (decoder_ == NULL) { + ALOGE("Failed to allocate decoder in %s", __PRETTY_FUNCTION__); + goto bailout; + } + + // Fire up the pump thread. It will take care of starting and stopping the + // decoder. + ret_val = thread_->run("aah_decode_pump", ANDROID_PRIORITY_AUDIO); + if (OK != ret_val) { + ALOGE("Failed to start work thread in %s (res = %d)", + __PRETTY_FUNCTION__, ret_val); + goto bailout; + } + +bailout: + if (OK != ret_val) { + decoder_ = NULL; + format_ = NULL; + } + + return OK; +} + +status_t AAH_DecoderPump::shutdown() { + Mutex::Autolock lock(&init_lock_); + return shutdown_l(); +} + +status_t AAH_DecoderPump::shutdown_l() { + thread_->requestExit(); + thread_cond_.signal(); + thread_->requestExitAndWait(); + + for (MBQueue::iterator iter = in_queue_.begin(); + iter != in_queue_.end(); + ++iter) { + (*iter)->release(); + } + in_queue_.clear(); + + last_queued_pts_valid_ = false; + last_ts_transform_valid_ = false; + last_volume_ = 0xFF; + thread_status_ = OK; + + decoder_ = NULL; + format_ = NULL; + + return OK; +} + +status_t AAH_DecoderPump::read(MediaBuffer **buffer, + const ReadOptions *options) { + if (!buffer) { + return BAD_VALUE; + } + + *buffer = NULL; + + // While its not time to shut down, and we have no data to process, wait. + AutoMutex lock(&thread_lock_); + while (!thread_->exitPending() && in_queue_.empty()) + thread_cond_.wait(thread_lock_); + + // At this point, if its not time to shutdown then we must have something to + // process. Go ahead and pop the front of the queue for processing. + if (!thread_->exitPending()) { + CHECK(!in_queue_.empty()); + + *buffer = *(in_queue_.begin()); + in_queue_.erase(in_queue_.begin()); + } + + // If we managed to get a buffer, then everything must be OK. If not, then + // we must be shutting down. + return (NULL == *buffer) ? INVALID_OPERATION : OK; +} + +AAH_DecoderPump::ThreadWrapper::ThreadWrapper(AAH_DecoderPump* owner) + : Thread(false /* canCallJava*/ ) + , owner_(owner) { +} + +bool AAH_DecoderPump::ThreadWrapper::threadLoop() { + CHECK(NULL != owner_); + owner_->workThread(); + return false; +} + +} // namespace android diff --git a/media/libaah_rtp/aah_decoder_pump.h b/media/libaah_rtp/aah_decoder_pump.h new file mode 100644 index 0000000..f5a6529 --- /dev/null +++ b/media/libaah_rtp/aah_decoder_pump.h @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __DECODER_PUMP_H__ +#define __DECODER_PUMP_H__ + +#include <pthread.h> + +#include <common_time/cc_helper.h> +#include <media/stagefright/MediaSource.h> +#include <utils/LinearTransform.h> +#include <utils/List.h> +#include <utils/threads.h> + +namespace android { + +class MetaData; +class OMXClient; +class TimedAudioTrack; + +class AAH_DecoderPump : public MediaSource { + public: + explicit AAH_DecoderPump(OMXClient& omx); + status_t initCheck(); + + status_t queueForDecode(MediaBuffer* buf); + + status_t init(const sp<MetaData>& params); + status_t shutdown(); + + void setRenderTSTransform(const LinearTransform& trans); + void setRenderVolume(uint8_t volume); + bool isAboutToUnderflow(int64_t threshold); + bool getStatus() const { return thread_status_; } + + // MediaSource methods + virtual status_t start(MetaData *params) { return OK; } + virtual sp<MetaData> getFormat() { return format_; } + virtual status_t stop() { return OK; } + virtual status_t read(MediaBuffer **buffer, + const ReadOptions *options); + + protected: + virtual ~AAH_DecoderPump(); + + private: + class ThreadWrapper : public Thread { + public: + friend class AAH_DecoderPump; + explicit ThreadWrapper(AAH_DecoderPump* owner); + + private: + virtual bool threadLoop(); + AAH_DecoderPump* owner_; + + DISALLOW_EVIL_CONSTRUCTORS(ThreadWrapper); + }; + + void* workThread(); + virtual status_t shutdown_l(); + void queueToRenderer(MediaBuffer* decoded_sample); + void stopAndCleanupRenderer(); + + sp<MetaData> format_; + int32_t format_channels_; + int32_t format_sample_rate_; + + sp<MediaSource> decoder_; + OMXClient& omx_; + Mutex init_lock_; + + sp<ThreadWrapper> thread_; + Condition thread_cond_; + Mutex thread_lock_; + status_t thread_status_; + + Mutex render_lock_; + TimedAudioTrack* renderer_; + bool last_queued_pts_valid_; + int64_t last_queued_pts_; + bool last_ts_transform_valid_; + LinearTransform last_ts_transform_; + uint8_t last_volume_; + CCHelper cc_helper_; + + // protected by the thread_lock_ + typedef List<MediaBuffer*> MBQueue; + MBQueue in_queue_; + + DISALLOW_EVIL_CONSTRUCTORS(AAH_DecoderPump); +}; + +} // namespace android +#endif // __DECODER_PUMP_H__ diff --git a/media/libaah_rtp/aah_rx_player.cpp b/media/libaah_rtp/aah_rx_player.cpp new file mode 100644 index 0000000..9dd79fd --- /dev/null +++ b/media/libaah_rtp/aah_rx_player.cpp @@ -0,0 +1,288 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "LibAAH_RTP" +//#define LOG_NDEBUG 0 + +#include <binder/IServiceManager.h> +#include <media/MediaPlayerInterface.h> +#include <utils/Log.h> + +#include "aah_rx_player.h" + +namespace android { + +const uint32_t AAH_RXPlayer::kRTPRingBufferSize = 1 << 10; + +sp<MediaPlayerBase> createAAH_RXPlayer() { + sp<MediaPlayerBase> ret = new AAH_RXPlayer(); + return ret; +} + +AAH_RXPlayer::AAH_RXPlayer() + : ring_buffer_(kRTPRingBufferSize) + , substreams_(NULL) { + thread_wrapper_ = new ThreadWrapper(*this); + + is_playing_ = false; + multicast_joined_ = false; + transmitter_known_ = false; + current_epoch_known_ = false; + data_source_set_ = false; + sock_fd_ = -1; + + substreams_.setCapacity(4); + + memset(&listen_addr_, 0, sizeof(listen_addr_)); + memset(&transmitter_addr_, 0, sizeof(transmitter_addr_)); + + fetchAudioFlinger(); +} + +AAH_RXPlayer::~AAH_RXPlayer() { + reset_l(); + CHECK(substreams_.size() == 0); + omx_.disconnect(); +} + +status_t AAH_RXPlayer::initCheck() { + if (thread_wrapper_ == NULL) { + ALOGE("Failed to allocate thread wrapper!"); + return NO_MEMORY; + } + + if (!ring_buffer_.initCheck()) { + ALOGE("Failed to allocate reassembly ring buffer!"); + return NO_MEMORY; + } + + // Check for the presense of the common time service by attempting to query + // for CommonTime's frequency. If we get an error back, we cannot talk to + // the service at all and should abort now. + status_t res; + uint64_t freq; + res = cc_helper_.getCommonFreq(&freq); + if (OK != res) { + ALOGE("Failed to connect to common time service!"); + return res; + } + + return omx_.connect(); +} + +status_t AAH_RXPlayer::setDataSource( + const char *url, + const KeyedVector<String8, String8> *headers) { + AutoMutex api_lock(&api_lock_); + uint32_t a, b, c, d; + uint16_t port; + + if (data_source_set_) { + return INVALID_OPERATION; + } + + if (NULL == url) { + return BAD_VALUE; + } + + if (5 != sscanf(url, "%*[^:/]://%u.%u.%u.%u:%hu", &a, &b, &c, &d, &port)) { + ALOGE("Failed to parse URL \"%s\"", url); + return BAD_VALUE; + } + + if ((a > 255) || (b > 255) || (c > 255) || (d > 255) || (port == 0)) { + ALOGE("Bad multicast address \"%s\"", url); + return BAD_VALUE; + } + + ALOGI("setDataSource :: %u.%u.%u.%u:%hu", a, b, c, d, port); + + a = (a << 24) | (b << 16) | (c << 8) | d; + + memset(&listen_addr_, 0, sizeof(listen_addr_)); + listen_addr_.sin_family = AF_INET; + listen_addr_.sin_port = htons(port); + listen_addr_.sin_addr.s_addr = htonl(a); + data_source_set_ = true; + + return OK; +} + +status_t AAH_RXPlayer::setDataSource(int fd, int64_t offset, int64_t length) { + return INVALID_OPERATION; +} + +status_t AAH_RXPlayer::setVideoSurface(const sp<Surface>& surface) { + return OK; +} + +status_t AAH_RXPlayer::setVideoSurfaceTexture( + const sp<ISurfaceTexture>& surfaceTexture) { + return OK; +} + +status_t AAH_RXPlayer::prepare() { + return OK; +} + +status_t AAH_RXPlayer::prepareAsync() { + sendEvent(MEDIA_PREPARED); + return OK; +} + +status_t AAH_RXPlayer::start() { + AutoMutex api_lock(&api_lock_); + + if (is_playing_) { + return OK; + } + + status_t res = startWorkThread(); + is_playing_ = (res == OK); + return res; +} + +status_t AAH_RXPlayer::stop() { + return pause(); +} + +status_t AAH_RXPlayer::pause() { + AutoMutex api_lock(&api_lock_); + stopWorkThread(); + CHECK(sock_fd_ < 0); + is_playing_ = false; + return OK; +} + +bool AAH_RXPlayer::isPlaying() { + AutoMutex api_lock(&api_lock_); + return is_playing_; +} + +status_t AAH_RXPlayer::seekTo(int msec) { + sendEvent(MEDIA_SEEK_COMPLETE); + return OK; +} + +status_t AAH_RXPlayer::getCurrentPosition(int *msec) { + if (NULL != msec) { + *msec = 0; + } + return OK; +} + +status_t AAH_RXPlayer::getDuration(int *msec) { + if (NULL != msec) { + *msec = 1; + } + return OK; +} + +status_t AAH_RXPlayer::reset() { + AutoMutex api_lock(&api_lock_); + reset_l(); + return OK; +} + +void AAH_RXPlayer::reset_l() { + stopWorkThread(); + CHECK(sock_fd_ < 0); + CHECK(!multicast_joined_); + is_playing_ = false; + data_source_set_ = false; + transmitter_known_ = false; + memset(&listen_addr_, 0, sizeof(listen_addr_)); +} + +status_t AAH_RXPlayer::setLooping(int loop) { + return OK; +} + +player_type AAH_RXPlayer::playerType() { + return AAH_RX_PLAYER; +} + +status_t AAH_RXPlayer::setParameter(int key, const Parcel &request) { + return ERROR_UNSUPPORTED; +} + +status_t AAH_RXPlayer::getParameter(int key, Parcel *reply) { + return ERROR_UNSUPPORTED; +} + +status_t AAH_RXPlayer::invoke(const Parcel& request, Parcel *reply) { + if (!reply) { + return BAD_VALUE; + } + + int32_t magic; + status_t err = request.readInt32(&magic); + if (err != OK) { + reply->writeInt32(err); + return OK; + } + + if (magic != 0x12345) { + reply->writeInt32(BAD_VALUE); + return OK; + } + + int32_t methodID; + err = request.readInt32(&methodID); + if (err != OK) { + reply->writeInt32(err); + return OK; + } + + switch (methodID) { + // Get Volume + case INVOKE_GET_MASTER_VOLUME: { + if (audio_flinger_ != NULL) { + reply->writeInt32(OK); + reply->writeFloat(audio_flinger_->masterVolume()); + } else { + reply->writeInt32(UNKNOWN_ERROR); + } + } break; + + // Set Volume + case INVOKE_SET_MASTER_VOLUME: { + float targetVol = request.readFloat(); + reply->writeInt32(audio_flinger_->setMasterVolume(targetVol)); + } break; + + default: return BAD_VALUE; + } + + return OK; +} + +void AAH_RXPlayer::fetchAudioFlinger() { + if (audio_flinger_ == NULL) { + sp<IServiceManager> sm = defaultServiceManager(); + sp<IBinder> binder; + binder = sm->getService(String16("media.audio_flinger")); + + if (binder == NULL) { + ALOGW("AAH_RXPlayer failed to fetch handle to audio flinger." + " Master volume control will not be possible."); + } + + audio_flinger_ = interface_cast<IAudioFlinger>(binder); + } +} + +} // namespace android diff --git a/media/libaah_rtp/aah_rx_player.h b/media/libaah_rtp/aah_rx_player.h new file mode 100644 index 0000000..ba5617e --- /dev/null +++ b/media/libaah_rtp/aah_rx_player.h @@ -0,0 +1,318 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AAH_RX_PLAYER_H__ +#define __AAH_RX_PLAYER_H__ + +#include <common_time/cc_helper.h> +#include <media/MediaPlayerInterface.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/MediaBuffer.h> +#include <media/stagefright/MediaSource.h> +#include <media/stagefright/MetaData.h> +#include <media/stagefright/OMXClient.h> +#include <netinet/in.h> +#include <utils/KeyedVector.h> +#include <utils/LinearTransform.h> +#include <utils/threads.h> + +#include "aah_decoder_pump.h" +#include "pipe_event.h" + +namespace android { + +class AAH_RXPlayer : public MediaPlayerInterface { + public: + AAH_RXPlayer(); + + virtual status_t initCheck(); + virtual status_t setDataSource(const char *url, + const KeyedVector<String8, String8>* + headers); + virtual status_t setDataSource(int fd, int64_t offset, int64_t length); + virtual status_t setVideoSurface(const sp<Surface>& surface); + virtual status_t setVideoSurfaceTexture(const sp<ISurfaceTexture>& + surfaceTexture); + virtual status_t prepare(); + virtual status_t prepareAsync(); + virtual status_t start(); + virtual status_t stop(); + virtual status_t pause(); + virtual bool isPlaying(); + virtual status_t seekTo(int msec); + virtual status_t getCurrentPosition(int *msec); + virtual status_t getDuration(int *msec); + virtual status_t reset(); + virtual status_t setLooping(int loop); + virtual player_type playerType(); + virtual status_t setParameter(int key, const Parcel &request); + virtual status_t getParameter(int key, Parcel *reply); + virtual status_t invoke(const Parcel& request, Parcel *reply); + + protected: + virtual ~AAH_RXPlayer(); + + private: + class ThreadWrapper : public Thread { + public: + friend class AAH_RXPlayer; + explicit ThreadWrapper(AAH_RXPlayer& player) + : Thread(false /* canCallJava */ ) + , player_(player) { } + + virtual bool threadLoop() { return player_.threadLoop(); } + + private: + AAH_RXPlayer& player_; + + DISALLOW_EVIL_CONSTRUCTORS(ThreadWrapper); + }; + +#pragma pack(push, 1) + // PacketBuffers are structures used by the RX ring buffer. The ring buffer + // is a ring of pointers to PacketBuffer structures which act as variable + // length byte arrays and hold the contents of received UDP packets. Rather + // than make this a structure which hold a length and a pointer to another + // allocated structure (which would require two allocations), this struct + // uses a structure overlay pattern where allocation for the byte array + // consists of allocating (arrayLen + sizeof(ssize_t)) bytes of data from + // whatever pool/heap the packet buffer pulls from, and then overlaying the + // packed PacketBuffer structure on top of the allocation. The one-byte + // array at the end of the structure serves as an offset to the the data + // portion of the allocation; packet buffers are never allocated on the + // stack or using the new operator. Instead, the static allocate-byte-array + // and destroy methods handle the allocate and overlay pattern. They also + // allow for a potential future optimization where instead of just + // allocating blocks from the process global heap and overlaying, the + // allocator is replaced with a different implementation (private heap, + // free-list, circular buffer, etc) which reduces potential heap + // fragmentation issues which might arise from the frequent allocation and + // destruction of the received UDP traffic. + struct PacketBuffer { + ssize_t length_; + uint8_t data_[1]; + + // TODO : consider changing this to be some form of ring buffer or free + // pool system instead of just using the heap in order to avoid heap + // fragmentation. + static PacketBuffer* allocate(ssize_t length); + static void destroy(PacketBuffer* pb); + + private: + // Force people to use allocate/destroy instead of new/delete. + PacketBuffer() { } + ~PacketBuffer() { } + }; + + struct RetransRequest { + uint32_t magic_; + uint32_t mcast_ip_; + uint16_t mcast_port_; + uint16_t start_seq_; + uint16_t end_seq_; + }; +#pragma pack(pop) + + enum GapStatus { + kGS_NoGap = 0, + kGS_NormalGap, + kGS_FastStartGap, + }; + + struct SeqNoGap { + uint16_t start_seq_; + uint16_t end_seq_; + }; + + class RXRingBuffer { + public: + explicit RXRingBuffer(uint32_t capacity); + ~RXRingBuffer(); + + bool initCheck() const { return (ring_ != NULL); } + void reset(); + + // Push a packet buffer with a given sequence number into the ring + // buffer. pushBuffer will always consume the buffer pushed to it, + // either destroying it because it was a duplicate or overflow, or + // holding on to it in the ring. Callers should not hold any references + // to PacketBuffers after they have been pushed to the ring. Returns + // false in the case of a serious error (such as ring overflow). + // Callers should consider resetting the pipeline entirely in the event + // of a serious error. + bool pushBuffer(PacketBuffer* buf, uint16_t seq); + + // Fetch the next buffer in the RTP sequence. Returns NULL if there is + // no buffer to fetch. If a non-NULL PacketBuffer is returned, + // is_discon will be set to indicate whether or not this PacketBuffer is + // discontiuous with any previously returned packet buffers. Packet + // buffers returned by fetchBuffer are the caller's responsibility; they + // must be certain to destroy the buffers when they are done. + PacketBuffer* fetchBuffer(bool* is_discon); + + // Returns true and fills out the gap structure if the read pointer of + // the ring buffer is currently pointing to a gap which would stall a + // fetchBuffer operation. Returns false if the read pointer is not + // pointing to a gap in the sequence currently. + GapStatus fetchCurrentGap(SeqNoGap* gap); + + // Causes the read pointer to skip over any portion of a gap indicated + // by nak. If nak is NULL, any gap currently blocking the read pointer + // will be completely skipped. If any portion of a gap is skipped, the + // next successful read from fetch buffer will indicate a discontinuity. + void processNAK(const SeqNoGap* nak = NULL); + + // Compute the number of milliseconds until the inactivity timer for + // this RTP stream. Returns -1 if there is no active timeout, or 0 if + // the system has already timed out. + int computeInactivityTimeout(); + + private: + Mutex lock_; + PacketBuffer** ring_; + uint32_t capacity_; + uint32_t rd_; + uint32_t wr_; + + uint16_t rd_seq_; + bool rd_seq_known_; + bool waiting_for_fast_start_; + bool fetched_first_packet_; + + uint64_t rtp_activity_timeout_; + bool rtp_activity_timeout_valid_; + + DISALLOW_EVIL_CONSTRUCTORS(RXRingBuffer); + }; + + class Substream : public virtual RefBase { + public: + Substream(uint32_t ssrc, OMXClient& omx); + + void cleanupBufferInProgress(); + void shutdown(); + void processPayloadStart(uint8_t* buf, + uint32_t amt, + int32_t ts_lower); + void processPayloadCont (uint8_t* buf, + uint32_t amt); + void processTSTransform(const LinearTransform& trans); + + bool isAboutToUnderflow(); + uint32_t getSSRC() const { return ssrc_; } + uint16_t getProgramID() const { return (ssrc_ >> 5) & 0x1F; } + status_t getStatus() const { return status_; } + + protected: + virtual ~Substream(); + + private: + void cleanupDecoder(); + bool shouldAbort(const char* log_tag); + void processCompletedBuffer(); + bool setupSubstreamMeta(); + bool setupMP3SubstreamMeta(); + bool setupAACSubstreamMeta(); + bool setupSubstreamType(uint8_t substream_type, + uint8_t codec_type); + + uint32_t ssrc_; + bool waiting_for_rap_; + status_t status_; + + bool substream_details_known_; + uint8_t substream_type_; + uint8_t codec_type_; + const char* codec_mime_type_; + sp<MetaData> substream_meta_; + + MediaBuffer* buffer_in_progress_; + uint32_t expected_buffer_size_; + uint32_t buffer_filled_; + + Vector<uint8_t> aux_data_in_progress_; + uint32_t aux_data_expected_size_; + + sp<AAH_DecoderPump> decoder_; + + static int64_t kAboutToUnderflowThreshold; + + DISALLOW_EVIL_CONSTRUCTORS(Substream); + }; + + typedef DefaultKeyedVector< uint32_t, sp<Substream> > SubstreamVec; + + status_t startWorkThread(); + void stopWorkThread(); + virtual bool threadLoop(); + bool setupSocket(); + void cleanupSocket(); + void resetPipeline(); + void reset_l(); + bool processRX(PacketBuffer* pb); + void processRingBuffer(); + void processCommandPacket(PacketBuffer* pb); + bool processGaps(); + int computeNextGapRetransmitTimeout(); + void fetchAudioFlinger(); + + PipeEvent wakeup_work_thread_evt_; + sp<ThreadWrapper> thread_wrapper_; + Mutex api_lock_; + bool is_playing_; + bool data_source_set_; + + struct sockaddr_in listen_addr_; + int sock_fd_; + bool multicast_joined_; + + struct sockaddr_in transmitter_addr_; + bool transmitter_known_; + + uint32_t current_epoch_; + bool current_epoch_known_; + + SeqNoGap current_gap_; + GapStatus current_gap_status_; + uint64_t next_retrans_req_time_; + + RXRingBuffer ring_buffer_; + SubstreamVec substreams_; + OMXClient omx_; + CCHelper cc_helper_; + + // Connection to audio flinger used to hack a path to setMasterVolume. + sp<IAudioFlinger> audio_flinger_; + + static const uint32_t kRTPRingBufferSize; + static const uint32_t kRetransRequestMagic; + static const uint32_t kFastStartRequestMagic; + static const uint32_t kRetransNAKMagic; + static const uint32_t kGapRerequestTimeoutUSec; + static const uint32_t kFastStartTimeoutUSec; + static const uint32_t kRTPActivityTimeoutUSec; + + static const uint32_t INVOKE_GET_MASTER_VOLUME = 3; + static const uint32_t INVOKE_SET_MASTER_VOLUME = 4; + + static uint64_t monotonicUSecNow(); + + DISALLOW_EVIL_CONSTRUCTORS(AAH_RXPlayer); +}; + +} // namespace android + +#endif // __AAH_RX_PLAYER_H__ diff --git a/media/libaah_rtp/aah_rx_player_core.cpp b/media/libaah_rtp/aah_rx_player_core.cpp new file mode 100644 index 0000000..d6b31fd --- /dev/null +++ b/media/libaah_rtp/aah_rx_player_core.cpp @@ -0,0 +1,809 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "LibAAH_RTP" +//#define LOG_NDEBUG 0 +#include <utils/Log.h> + +#include <fcntl.h> +#include <poll.h> +#include <sys/socket.h> +#include <time.h> +#include <utils/misc.h> + +#include <media/stagefright/Utils.h> + +#include "aah_rx_player.h" +#include "aah_tx_packet.h" + +namespace android { + +const uint32_t AAH_RXPlayer::kRetransRequestMagic = + FOURCC('T','r','e','q'); +const uint32_t AAH_RXPlayer::kRetransNAKMagic = + FOURCC('T','n','a','k'); +const uint32_t AAH_RXPlayer::kFastStartRequestMagic = + FOURCC('T','f','s','t'); +const uint32_t AAH_RXPlayer::kGapRerequestTimeoutUSec = 75000; +const uint32_t AAH_RXPlayer::kFastStartTimeoutUSec = 800000; +const uint32_t AAH_RXPlayer::kRTPActivityTimeoutUSec = 10000000; + +static inline int16_t fetchInt16(uint8_t* data) { + return static_cast<int16_t>(U16_AT(data)); +} + +static inline int32_t fetchInt32(uint8_t* data) { + return static_cast<int32_t>(U32_AT(data)); +} + +static inline int64_t fetchInt64(uint8_t* data) { + return static_cast<int64_t>(U64_AT(data)); +} + +uint64_t AAH_RXPlayer::monotonicUSecNow() { + struct timespec now; + int res = clock_gettime(CLOCK_MONOTONIC, &now); + CHECK(res >= 0); + + uint64_t ret = static_cast<uint64_t>(now.tv_sec) * 1000000; + ret += now.tv_nsec / 1000; + + return ret; +} + +status_t AAH_RXPlayer::startWorkThread() { + status_t res; + stopWorkThread(); + res = thread_wrapper_->run("TRX_Player", PRIORITY_AUDIO); + + if (res != OK) { + ALOGE("Failed to start work thread (res = %d)", res); + } + + return res; +} + +void AAH_RXPlayer::stopWorkThread() { + thread_wrapper_->requestExit(); // set the exit pending flag + wakeup_work_thread_evt_.setEvent(); + + status_t res; + res = thread_wrapper_->requestExitAndWait(); // block until thread exit. + if (res != OK) { + ALOGE("Failed to stop work thread (res = %d)", res); + } + + wakeup_work_thread_evt_.clearPendingEvents(); +} + +void AAH_RXPlayer::cleanupSocket() { + if (sock_fd_ >= 0) { + if (multicast_joined_) { + int res; + struct ip_mreq mreq; + mreq.imr_multiaddr = listen_addr_.sin_addr; + mreq.imr_interface.s_addr = htonl(INADDR_ANY); + res = setsockopt(sock_fd_, + IPPROTO_IP, + IP_DROP_MEMBERSHIP, + &mreq, sizeof(mreq)); + if (res < 0) { + ALOGW("Failed to leave multicast group. (%d, %d)", res, errno); + } + multicast_joined_ = false; + } + + close(sock_fd_); + sock_fd_ = -1; + } + + resetPipeline(); +} + +void AAH_RXPlayer::resetPipeline() { + ring_buffer_.reset(); + + // Explicitly shudown all of the active substreams, then call clear out the + // collection. Failure to clear out a substream can result in its decoder + // holding a reference to itself and therefor not going away when the + // collection is cleared. + for (size_t i = 0; i < substreams_.size(); ++i) + substreams_.valueAt(i)->shutdown(); + + substreams_.clear(); + + current_gap_status_ = kGS_NoGap; +} + +bool AAH_RXPlayer::setupSocket() { + long flags; + int res, buf_size; + socklen_t opt_size; + + cleanupSocket(); + CHECK(sock_fd_ < 0); + + // Make the socket + sock_fd_ = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); + if (sock_fd_ < 0) { + ALOGE("Failed to create listen socket (errno %d)", errno); + goto bailout; + } + + // Set non-blocking operation + flags = fcntl(sock_fd_, F_GETFL); + res = fcntl(sock_fd_, F_SETFL, flags | O_NONBLOCK); + if (res < 0) { + ALOGE("Failed to set socket (%d) to non-blocking mode (errno %d)", + sock_fd_, errno); + goto bailout; + } + + // Bind to our port + struct sockaddr_in bind_addr; + memset(&bind_addr, 0, sizeof(bind_addr)); + bind_addr.sin_family = AF_INET; + bind_addr.sin_addr.s_addr = INADDR_ANY; + bind_addr.sin_port = listen_addr_.sin_port; + res = bind(sock_fd_, + reinterpret_cast<const sockaddr*>(&bind_addr), + sizeof(bind_addr)); + if (res < 0) { + uint32_t a = ntohl(bind_addr.sin_addr.s_addr); + uint16_t p = ntohs(bind_addr.sin_port); + ALOGE("Failed to bind socket (%d) to %d.%d.%d.%d:%hd. (errno %d)", + sock_fd_, + (a >> 24) & 0xFF, + (a >> 16) & 0xFF, + (a >> 8) & 0xFF, + (a ) & 0xFF, + p, + errno); + + goto bailout; + } + + buf_size = 1 << 16; // 64k + res = setsockopt(sock_fd_, + SOL_SOCKET, SO_RCVBUF, + &buf_size, sizeof(buf_size)); + if (res < 0) { + ALOGW("Failed to increase socket buffer size to %d. (errno %d)", + buf_size, errno); + } + + buf_size = 0; + opt_size = sizeof(buf_size); + res = getsockopt(sock_fd_, + SOL_SOCKET, SO_RCVBUF, + &buf_size, &opt_size); + if (res < 0) { + ALOGW("Failed to fetch socket buffer size. (errno %d)", errno); + } else { + ALOGI("RX socket buffer size is now %d bytes", buf_size); + } + + if (listen_addr_.sin_addr.s_addr) { + // Join the multicast group and we should be good to go. + struct ip_mreq mreq; + mreq.imr_multiaddr = listen_addr_.sin_addr; + mreq.imr_interface.s_addr = htonl(INADDR_ANY); + res = setsockopt(sock_fd_, + IPPROTO_IP, + IP_ADD_MEMBERSHIP, + &mreq, sizeof(mreq)); + if (res < 0) { + ALOGE("Failed to join multicast group. (errno %d)", errno); + goto bailout; + } + multicast_joined_ = true; + } + + return true; + +bailout: + cleanupSocket(); + return false; +} + +bool AAH_RXPlayer::threadLoop() { + struct pollfd poll_fds[2]; + bool process_more_right_now = false; + + if (!setupSocket()) { + sendEvent(MEDIA_ERROR); + goto bailout; + } + + while (!thread_wrapper_->exitPending()) { + // Step 1: Wait until there is something to do. + int gap_timeout = computeNextGapRetransmitTimeout(); + int ring_timeout = ring_buffer_.computeInactivityTimeout(); + int timeout = -1; + + if (!ring_timeout) { + ALOGW("RTP inactivity timeout reached, resetting pipeline."); + resetPipeline(); + timeout = gap_timeout; + } else { + if (gap_timeout < 0) { + timeout = ring_timeout; + } else if (ring_timeout < 0) { + timeout = gap_timeout; + } else { + timeout = (gap_timeout < ring_timeout) ? gap_timeout + : ring_timeout; + } + } + + if ((0 != timeout) && (!process_more_right_now)) { + // Set up the events to wait on. Start with the wakeup pipe. + memset(&poll_fds, 0, sizeof(poll_fds)); + poll_fds[0].fd = wakeup_work_thread_evt_.getWakeupHandle(); + poll_fds[0].events = POLLIN; + + // Add the RX socket. + poll_fds[1].fd = sock_fd_; + poll_fds[1].events = POLLIN; + + // Wait for something interesing to happen. + int poll_res = poll(poll_fds, NELEM(poll_fds), timeout); + if (poll_res < 0) { + ALOGE("Fatal error (%d,%d) while waiting on events", + poll_res, errno); + sendEvent(MEDIA_ERROR); + goto bailout; + } + } + + if (thread_wrapper_->exitPending()) { + break; + } + + wakeup_work_thread_evt_.clearPendingEvents(); + process_more_right_now = false; + + // Step 2: Do we have data waiting in the socket? If so, drain the + // socket moving valid RTP information into the ring buffer to be + // processed. + if (poll_fds[1].revents) { + struct sockaddr_in from; + socklen_t from_len; + + ssize_t res = 0; + while (!thread_wrapper_->exitPending()) { + // Check the size of any pending packet. + res = recv(sock_fd_, NULL, 0, MSG_PEEK | MSG_TRUNC); + + // Error? + if (res < 0) { + // If the error is anything other than would block, + // something has gone very wrong. + if ((errno != EAGAIN) && (errno != EWOULDBLOCK)) { + ALOGE("Fatal socket error during recvfrom (%d, %d)", + (int)res, errno); + goto bailout; + } + + // Socket is out of data, just break out of processing and + // wait for more. + break; + } + + // Allocate a payload. + PacketBuffer* pb = PacketBuffer::allocate(res); + if (NULL == pb) { + ALOGE("Fatal error, failed to allocate packet buffer of" + " length %u", static_cast<uint32_t>(res)); + goto bailout; + } + + // Fetch the data. + from_len = sizeof(from); + res = recvfrom(sock_fd_, pb->data_, pb->length_, 0, + reinterpret_cast<struct sockaddr*>(&from), + &from_len); + if (res != pb->length_) { + ALOGE("Fatal error, fetched packet length (%d) does not" + " match peeked packet length (%u). This should never" + " happen. (errno = %d)", + static_cast<int>(res), + static_cast<uint32_t>(pb->length_), + errno); + } + + bool drop_packet = false; + if (transmitter_known_) { + if (from.sin_addr.s_addr != + transmitter_addr_.sin_addr.s_addr) { + uint32_t a = ntohl(from.sin_addr.s_addr); + uint16_t p = ntohs(from.sin_port); + ALOGV("Dropping packet from unknown transmitter" + " %u.%u.%u.%u:%hu", + ((a >> 24) & 0xFF), + ((a >> 16) & 0xFF), + ((a >> 8) & 0xFF), + ( a & 0xFF), + p); + + drop_packet = true; + } else { + transmitter_addr_.sin_port = from.sin_port; + } + } else { + memcpy(&transmitter_addr_, &from, sizeof(from)); + transmitter_known_ = true; + } + + if (!drop_packet) { + bool serious_error = !processRX(pb); + + if (serious_error) { + // Something went "seriously wrong". Currently, the + // only trigger for this should be a ring buffer + // overflow. The current failsafe behavior for when + // something goes seriously wrong is to just reset the + // pipeline. The system should behave as if this + // AAH_RXPlayer was just set up for the first time. + ALOGE("Something just went seriously wrong with the" + " pipeline. Resetting."); + resetPipeline(); + } + } else { + PacketBuffer::destroy(pb); + } + } + } + + // Step 3: Process any data we mave have accumulated in the ring buffer + // so far. + if (!thread_wrapper_->exitPending()) { + processRingBuffer(); + } + + // Step 4: At this point in time, the ring buffer should either be + // empty, or stalled in front of a gap caused by some dropped packets. + // Check on the current gap situation and deal with it in an appropriate + // fashion. If processGaps returns true, it means that it has given up + // on a gap and that we should try to process some more data + // immediately. + if (!thread_wrapper_->exitPending()) { + process_more_right_now = processGaps(); + } + + // Step 5: Check for fatal errors. If any of our substreams has + // encountered a fatal, unrecoverable, error, then propagate the error + // up to user level and shut down. + for (size_t i = 0; i < substreams_.size(); ++i) { + status_t status; + CHECK(substreams_.valueAt(i) != NULL); + + status = substreams_.valueAt(i)->getStatus(); + if (OK != status) { + ALOGE("Substream index %d has encountered an unrecoverable" + " error (%d). Signalling application level and shutting" + " down.", i, status); + sendEvent(MEDIA_ERROR); + goto bailout; + } + } + } + +bailout: + cleanupSocket(); + return false; +} + +bool AAH_RXPlayer::processRX(PacketBuffer* pb) { + CHECK(NULL != pb); + + uint8_t* data = pb->data_; + ssize_t amt = pb->length_; + uint32_t nak_magic; + uint16_t seq_no; + uint32_t epoch; + + // Every packet either starts with an RTP header which is at least 12 bytes + // long or is a retry NAK which is 14 bytes long. If there are fewer than + // 12 bytes here, this cannot be a proper RTP packet. + if (amt < 12) { + ALOGV("Dropping packet, too short to contain RTP header (%u bytes)", + static_cast<uint32_t>(amt)); + goto drop_packet; + } + + // Check to see if this is the special case of a NAK packet. + nak_magic = ntohl(*(reinterpret_cast<uint32_t*>(data))); + if (nak_magic == kRetransNAKMagic) { + // Looks like a NAK packet; make sure its long enough. + + if (amt < static_cast<ssize_t>(sizeof(RetransRequest))) { + ALOGV("Dropping packet, too short to contain NAK payload" + " (%u bytes)", static_cast<uint32_t>(amt)); + goto drop_packet; + } + + SeqNoGap gap; + RetransRequest* rtr = reinterpret_cast<RetransRequest*>(data); + gap.start_seq_ = ntohs(rtr->start_seq_); + gap.end_seq_ = ntohs(rtr->end_seq_); + + ALOGV("Process NAK for gap at [%hu, %hu]", + gap.start_seq_, gap.end_seq_); + ring_buffer_.processNAK(&gap); + + return true; + } + + // According to the TRTP spec, version should be 2, padding should be 0, + // extension should be 0 and CSRCCnt should be 0. If any of these tests + // fail, we chuck the packet. + if (data[0] != 0x80) { + ALOGV("Dropping packet, bad V/P/X/CSRCCnt field (0x%02x)", + data[0]); + goto drop_packet; + } + + // Check the payload type. For TRTP, it should always be 100. + if ((data[1] & 0x7F) != 100) { + ALOGV("Dropping packet, bad payload type. (%u)", + data[1] & 0x7F); + goto drop_packet; + } + + // Check whether the transmitter has begun a new epoch. + epoch = (U32_AT(data + 8) >> 10) & 0x3FFFFF; + if (current_epoch_known_) { + if (epoch != current_epoch_) { + ALOGV("%s: new epoch %u", __PRETTY_FUNCTION__, epoch); + current_epoch_ = epoch; + resetPipeline(); + } + } else { + current_epoch_ = epoch; + current_epoch_known_ = true; + } + + // Extract the sequence number and hand the packet off to the ring buffer + // for dropped packet detection and later processing. + seq_no = U16_AT(data + 2); + return ring_buffer_.pushBuffer(pb, seq_no); + +drop_packet: + PacketBuffer::destroy(pb); + return true; +} + +void AAH_RXPlayer::processRingBuffer() { + PacketBuffer* pb; + bool is_discon; + sp<Substream> substream; + LinearTransform trans; + bool foundTrans = false; + + while (NULL != (pb = ring_buffer_.fetchBuffer(&is_discon))) { + if (is_discon) { + // Abort all partially assembled payloads. + for (size_t i = 0; i < substreams_.size(); ++i) { + CHECK(substreams_.valueAt(i) != NULL); + substreams_.valueAt(i)->cleanupBufferInProgress(); + } + } + + uint8_t* data = pb->data_; + ssize_t amt = pb->length_; + + // Should not have any non-RTP packets in the ring buffer. RTP packets + // must be at least 12 bytes long. + CHECK(amt >= 12); + + // Extract the marker bit and the SSRC field. + bool marker = (data[1] & 0x80) != 0; + uint32_t ssrc = U32_AT(data + 8); + + // Is this the start of a new TRTP payload? If so, the marker bit + // should be set and there are some things we should be checking for. + if (marker) { + // TRTP headers need to have at least a byte for version, a byte for + // payload type and flags, and 4 bytes for length. + if (amt < 18) { + ALOGV("Dropping packet, too short to contain TRTP header" + " (%u bytes)", static_cast<uint32_t>(amt)); + goto process_next_packet; + } + + // Check the TRTP version and extract the payload type/flags. + uint8_t trtp_version = data[12]; + uint8_t payload_type = (data[13] >> 4) & 0xF; + uint8_t trtp_flags = data[13] & 0xF; + + if (1 != trtp_version) { + ALOGV("Dropping packet, bad trtp version %hhu", trtp_version); + goto process_next_packet; + } + + // Is there a timestamp transformation present on this packet? If + // so, extract it and pass it to the appropriate substreams. + if (trtp_flags & 0x02) { + ssize_t offset = 18 + ((trtp_flags & 0x01) ? 4 : 0); + if (amt < (offset + 24)) { + ALOGV("Dropping packet, too short to contain TRTP Timestamp" + " Transformation (%u bytes)", + static_cast<uint32_t>(amt)); + goto process_next_packet; + } + + trans.a_zero = fetchInt64(data + offset); + trans.b_zero = fetchInt64(data + offset + 16); + trans.a_to_b_numer = static_cast<int32_t>( + fetchInt32 (data + offset + 8)); + trans.a_to_b_denom = U32_AT(data + offset + 12); + foundTrans = true; + + uint32_t program_id = (ssrc >> 5) & 0x1F; + for (size_t i = 0; i < substreams_.size(); ++i) { + sp<Substream> iter = substreams_.valueAt(i); + CHECK(iter != NULL); + + if (iter->getProgramID() == program_id) { + iter->processTSTransform(trans); + } + } + } + + // Is this a command packet? If so, its not necessarily associate + // with one particular substream. Just give it to the command + // packet handler and then move on. + if (4 == payload_type) { + processCommandPacket(pb); + goto process_next_packet; + } + } + + // If we got to here, then we are a normal packet. Find (or allocate) + // the substream we belong to and send the packet off to be processed. + substream = substreams_.valueFor(ssrc); + if (substream == NULL) { + substream = new Substream(ssrc, omx_); + if (substream == NULL) { + ALOGE("Failed to allocate substream for SSRC 0x%08x", ssrc); + goto process_next_packet; + } + substreams_.add(ssrc, substream); + + if (foundTrans) { + substream->processTSTransform(trans); + } + } + + CHECK(substream != NULL); + + if (marker) { + // Start of a new TRTP payload for this substream. Extract the + // lower 32 bits of the timestamp and hand the buffer to the + // substream for processing. + uint32_t ts_lower = U32_AT(data + 4); + substream->processPayloadStart(data + 12, amt - 12, ts_lower); + } else { + // Continuation of an existing TRTP payload. Just hand it off to + // the substream for processing. + substream->processPayloadCont(data + 12, amt - 12); + } + +process_next_packet: + PacketBuffer::destroy(pb); + } // end of main processing while loop. +} + +void AAH_RXPlayer::processCommandPacket(PacketBuffer* pb) { + CHECK(NULL != pb); + + uint8_t* data = pb->data_; + ssize_t amt = pb->length_; + + // verify that this packet meets the minimum length of a command packet + if (amt < 20) { + return; + } + + uint8_t trtp_version = data[12]; + uint8_t trtp_flags = data[13] & 0xF; + + if (1 != trtp_version) { + ALOGV("Dropping packet, bad trtp version %hhu", trtp_version); + return; + } + + // calculate the start of the command payload + ssize_t offset = 18; + if (trtp_flags & 0x01) { + // timestamp is present (4 bytes) + offset += 4; + } + if (trtp_flags & 0x02) { + // transform is present (24 bytes) + offset += 24; + } + + // the packet must contain 2 bytes of command payload beyond the TRTP header + if (amt < offset + 2) { + return; + } + + uint16_t command_id = U16_AT(data + offset); + + switch (command_id) { + case TRTPControlPacket::kCommandNop: + break; + + case TRTPControlPacket::kCommandEOS: + case TRTPControlPacket::kCommandFlush: { + uint16_t program_id = (U32_AT(data + 8) >> 5) & 0x1F; + ALOGI("*** %s flushing program_id=%d", + __PRETTY_FUNCTION__, program_id); + + Vector<uint32_t> substreams_to_remove; + for (size_t i = 0; i < substreams_.size(); ++i) { + sp<Substream> iter = substreams_.valueAt(i); + if (iter->getProgramID() == program_id) { + iter->shutdown(); + substreams_to_remove.add(iter->getSSRC()); + } + } + + for (size_t i = 0; i < substreams_to_remove.size(); ++i) { + substreams_.removeItem(substreams_to_remove[i]); + } + } break; + } +} + +bool AAH_RXPlayer::processGaps() { + // Deal with the current gap situation. Specifically... + // + // 1) If a new gap has shown up, send a retransmit request to the + // transmitter. + // 2) If a gap we were working on has had a packet in the middle or at + // the end filled in, send another retransmit request for the begining + // portion of the gap. TRTP was designed for LANs where packet + // re-ordering is very unlikely; so see the middle or end of a gap + // filled in before the begining is an almost certain indication that + // a retransmission packet was also dropped. + // 3) If we have been working on a gap for a while and it still has not + // been filled in, send another retransmit request. + // 4) If the are no more gaps in the ring, clear the current_gap_status_ + // flag to indicate that all is well again. + + // Start by fetching the active gap status. + SeqNoGap gap; + bool send_retransmit_request = false; + bool ret_val = false; + GapStatus gap_status; + if (kGS_NoGap != (gap_status = ring_buffer_.fetchCurrentGap(&gap))) { + // Note: checking for a change in the end sequence number should cover + // moving on to an entirely new gap for case #1 as well as resending the + // begining of a gap range for case #2. + send_retransmit_request = (kGS_NoGap == current_gap_status_) || + (current_gap_.end_seq_ != gap.end_seq_); + + // If this is the same gap we have been working on, and it has timed + // out, then check to see if our substreams are about to underflow. If + // so, instead of sending another retransmit request, just give up on + // this gap and move on. + if (!send_retransmit_request && + (kGS_NoGap != current_gap_status_) && + (0 == computeNextGapRetransmitTimeout())) { + + // If out current gap is the fast-start gap, don't bother to skip it + // because substreams look like the are about to underflow. + if ((kGS_FastStartGap != gap_status) || + (current_gap_.end_seq_ != gap.end_seq_)) { + for (size_t i = 0; i < substreams_.size(); ++i) { + if (substreams_.valueAt(i)->isAboutToUnderflow()) { + ALOGV("About to underflow, giving up on gap [%hu, %hu]", + gap.start_seq_, gap.end_seq_); + ring_buffer_.processNAK(); + current_gap_status_ = kGS_NoGap; + return true; + } + } + } + + // Looks like no one is about to underflow. Just go ahead and send + // the request. + send_retransmit_request = true; + } + } else { + current_gap_status_ = kGS_NoGap; + } + + if (send_retransmit_request) { + // If we have been working on a fast start, and it is still not filled + // in, even after the extended retransmit time out, give up and skip it. + // The system should fall back into its normal slow-start behavior. + if ((kGS_FastStartGap == current_gap_status_) && + (current_gap_.end_seq_ == gap.end_seq_)) { + ALOGV("Fast start is taking forever; giving up."); + ring_buffer_.processNAK(); + current_gap_status_ = kGS_NoGap; + return true; + } + + // Send the request. + RetransRequest req; + uint32_t magic = (kGS_FastStartGap == gap_status) + ? kFastStartRequestMagic + : kRetransRequestMagic; + req.magic_ = htonl(magic); + req.mcast_ip_ = listen_addr_.sin_addr.s_addr; + req.mcast_port_ = listen_addr_.sin_port; + req.start_seq_ = htons(gap.start_seq_); + req.end_seq_ = htons(gap.end_seq_); + + { + uint32_t a = ntohl(transmitter_addr_.sin_addr.s_addr); + uint16_t p = ntohs(transmitter_addr_.sin_port); + ALOGV("Sending to transmitter %u.%u.%u.%u:%hu", + ((a >> 24) & 0xFF), + ((a >> 16) & 0xFF), + ((a >> 8) & 0xFF), + ( a & 0xFF), + p); + } + + int res = sendto(sock_fd_, &req, sizeof(req), 0, + reinterpret_cast<struct sockaddr*>(&transmitter_addr_), + sizeof(transmitter_addr_)); + if (res < 0) { + ALOGE("Error when sending retransmit request (%d)", errno); + } else { + ALOGV("%s request for range [%hu, %hu] sent", + (kGS_FastStartGap == gap_status) ? "Fast Start" + : "Retransmit", + gap.start_seq_, gap.end_seq_); + } + + // Update the current gap info. + current_gap_ = gap; + current_gap_status_ = gap_status; + next_retrans_req_time_ = monotonicUSecNow() + + ((kGS_FastStartGap == current_gap_status_) + ? kFastStartTimeoutUSec + : kGapRerequestTimeoutUSec); + } + + return false; +} + +// Compute when its time to send the next gap retransmission in milliseconds. +// Returns < 0 for an infinite timeout (no gap) and 0 if its time to retransmit +// right now. +int AAH_RXPlayer::computeNextGapRetransmitTimeout() { + if (kGS_NoGap == current_gap_status_) { + return -1; + } + + int64_t timeout_delta = next_retrans_req_time_ - monotonicUSecNow(); + + timeout_delta /= 1000; + if (timeout_delta <= 0) { + return 0; + } + + return static_cast<uint32_t>(timeout_delta); +} + +} // namespace android diff --git a/media/libaah_rtp/aah_rx_player_ring_buffer.cpp b/media/libaah_rtp/aah_rx_player_ring_buffer.cpp new file mode 100644 index 0000000..779405e --- /dev/null +++ b/media/libaah_rtp/aah_rx_player_ring_buffer.cpp @@ -0,0 +1,366 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "LibAAH_RTP" +//#define LOG_NDEBUG 0 +#include <utils/Log.h> + +#include "aah_rx_player.h" + +namespace android { + +AAH_RXPlayer::RXRingBuffer::RXRingBuffer(uint32_t capacity) { + capacity_ = capacity; + rd_ = wr_ = 0; + ring_ = new PacketBuffer*[capacity]; + memset(ring_, 0, sizeof(PacketBuffer*) * capacity); + reset(); +} + +AAH_RXPlayer::RXRingBuffer::~RXRingBuffer() { + reset(); + delete[] ring_; +} + +void AAH_RXPlayer::RXRingBuffer::reset() { + AutoMutex lock(&lock_); + + if (NULL != ring_) { + while (rd_ != wr_) { + CHECK(rd_ < capacity_); + if (NULL != ring_[rd_]) { + PacketBuffer::destroy(ring_[rd_]); + ring_[rd_] = NULL; + } + rd_ = (rd_ + 1) % capacity_; + } + } + + rd_ = wr_ = 0; + rd_seq_known_ = false; + waiting_for_fast_start_ = true; + fetched_first_packet_ = false; + rtp_activity_timeout_valid_ = false; +} + +bool AAH_RXPlayer::RXRingBuffer::pushBuffer(PacketBuffer* buf, + uint16_t seq) { + AutoMutex lock(&lock_); + CHECK(NULL != ring_); + CHECK(NULL != buf); + + rtp_activity_timeout_valid_ = true; + rtp_activity_timeout_ = monotonicUSecNow() + kRTPActivityTimeoutUSec; + + // If the ring buffer is totally reset (we have never received a single + // payload) then we don't know the rd sequence number and this should be + // simple. We just store the payload, advance the wr pointer and record the + // initial sequence number. + if (!rd_seq_known_) { + CHECK(rd_ == wr_); + CHECK(NULL == ring_[wr_]); + CHECK(wr_ < capacity_); + + ring_[wr_] = buf; + wr_ = (wr_ + 1) % capacity_; + rd_seq_ = seq; + rd_seq_known_ = true; + return true; + } + + // Compute the seqence number of this payload and of the write pointer, + // normalized around the read pointer. IOW - transform the payload seq no + // and the wr pointer seq no into a space where the rd pointer seq no is + // zero. This will define 4 cases we can consider... + // + // 1) norm_seq == norm_wr_seq + // This payload is contiguous with the last. All is good. + // + // 2) ((norm_seq < norm_wr_seq) && (norm_seq >= norm_rd_seq) + // aka ((norm_seq < norm_wr_seq) && (norm_seq >= 0) + // This payload is in the past, in the unprocessed region of the ring + // buffer. It is probably a retransmit intended to fill in a dropped + // payload; it may be a duplicate. + // + // 3) ((norm_seq - norm_wr_seq) & 0x8000) != 0 + // This payload is in the past compared to the write pointer (or so very + // far in the future that it has wrapped the seq no space), but not in + // the unprocessed region of the ring buffer. This could be a duplicate + // retransmit; we just drop these payloads unless we are waiting for our + // first fast start packet. If we are waiting for fast start, than this + // packet is probably the first packet of the fast start retransmission. + // If it will fit in the buffer, back up the read pointer to its position + // and clear the fast start flag, otherwise just drop it. + // + // 4) ((norm_seq - norm_wr_seq) & 0x8000) == 0 + // This payload which is ahead of the next write pointer. This indicates + // that we have missed some payloads and need to request a retransmit. + // If norm_seq >= (capacity - 1), then the gap is so large that it would + // overflow the ring buffer and we should probably start to panic. + + uint16_t norm_wr_seq = ((wr_ + capacity_ - rd_) % capacity_); + uint16_t norm_seq = seq - rd_seq_; + + // Check for overflow first. + if ((!(norm_seq & 0x8000)) && (norm_seq >= (capacity_ - 1))) { + ALOGW("Ring buffer overflow; cap = %u, [rd, wr] = [%hu, %hu]," + " seq = %hu", capacity_, rd_seq_, norm_wr_seq + rd_seq_, seq); + PacketBuffer::destroy(buf); + return false; + } + + // Check for case #1 + if (norm_seq == norm_wr_seq) { + CHECK(wr_ < capacity_); + CHECK(NULL == ring_[wr_]); + + ring_[wr_] = buf; + wr_ = (wr_ + 1) % capacity_; + + CHECK(wr_ != rd_); + return true; + } + + // Check case #2 + uint32_t ring_pos = (rd_ + norm_seq) % capacity_; + if ((norm_seq < norm_wr_seq) && (!(norm_seq & 0x8000))) { + // Do we already have a payload for this slot? If so, then this looks + // like a duplicate retransmit. Just ignore it. + if (NULL != ring_[ring_pos]) { + ALOGD("RXed duplicate retransmit, seq = %hu", seq); + PacketBuffer::destroy(buf); + } else { + // Looks like we were missing this payload. Go ahead and store it. + ring_[ring_pos] = buf; + } + + return true; + } + + // Check case #3 + if ((norm_seq - norm_wr_seq) & 0x8000) { + if (!waiting_for_fast_start_) { + ALOGD("RXed duplicate retransmit from before rd pointer, seq = %hu", + seq); + PacketBuffer::destroy(buf); + } else { + // Looks like a fast start fill-in. Go ahead and store it, assuming + // that we can fit it in the buffer. + uint32_t implied_ring_size = static_cast<uint32_t>(norm_wr_seq) + + (rd_seq_ - seq); + + if (implied_ring_size >= (capacity_ - 1)) { + ALOGD("RXed what looks like a fast start packet (seq = %hu)," + " but packet is too far in the past to fit into the ring" + " buffer. Dropping.", seq); + PacketBuffer::destroy(buf); + } else { + ring_pos = (rd_ + capacity_ + seq - rd_seq_) % capacity_; + rd_seq_ = seq; + rd_ = ring_pos; + waiting_for_fast_start_ = false; + + CHECK(ring_pos < capacity_); + CHECK(NULL == ring_[ring_pos]); + ring_[ring_pos] = buf; + } + + } + return true; + } + + // Must be in case #4 with no overflow. This packet fits in the current + // ring buffer, but is discontiuguous. Advance the write pointer leaving a + // gap behind. + uint32_t gap_len = (ring_pos + capacity_ - wr_) % capacity_; + ALOGD("Drop detected; %u packets, seq_range [%hu, %hu]", + gap_len, + rd_seq_ + norm_wr_seq, + rd_seq_ + norm_wr_seq + gap_len - 1); + + CHECK(NULL == ring_[ring_pos]); + ring_[ring_pos] = buf; + wr_ = (ring_pos + 1) % capacity_; + CHECK(wr_ != rd_); + + return true; +} + +AAH_RXPlayer::PacketBuffer* +AAH_RXPlayer::RXRingBuffer::fetchBuffer(bool* is_discon) { + AutoMutex lock(&lock_); + CHECK(NULL != ring_); + CHECK(NULL != is_discon); + + // If the read seqence number is not known, then this ring buffer has not + // received a packet since being reset and there cannot be any packets to + // return. If we are still waiting for the first fast start packet to show + // up, we don't want to let any buffer be consumed yet because we expect to + // see a packet before the initial read sequence number show up shortly. + if (!rd_seq_known_ || waiting_for_fast_start_) { + *is_discon = false; + return NULL; + } + + PacketBuffer* ret = NULL; + *is_discon = !fetched_first_packet_; + + while ((rd_ != wr_) && (NULL == ret)) { + CHECK(rd_ < capacity_); + + // If we hit a gap, stall and do not advance the read pointer. Let the + // higher level code deal with requesting retries and/or deciding to + // skip the current gap. + ret = ring_[rd_]; + if (NULL == ret) { + break; + } + + ring_[rd_] = NULL; + rd_ = (rd_ + 1) % capacity_; + ++rd_seq_; + } + + if (NULL != ret) { + fetched_first_packet_ = true; + } + + return ret; +} + +AAH_RXPlayer::GapStatus +AAH_RXPlayer::RXRingBuffer::fetchCurrentGap(SeqNoGap* gap) { + AutoMutex lock(&lock_); + CHECK(NULL != ring_); + CHECK(NULL != gap); + + // If the read seqence number is not known, then this ring buffer has not + // received a packet since being reset and there cannot be any gaps. + if (!rd_seq_known_) { + return kGS_NoGap; + } + + // If we are waiting for fast start, then the current gap is a fast start + // gap and it includes all packets before the read sequence number. + if (waiting_for_fast_start_) { + gap->start_seq_ = + gap->end_seq_ = rd_seq_ - 1; + return kGS_FastStartGap; + } + + // If rd == wr, then the buffer is empty and there cannot be any gaps. + if (rd_ == wr_) { + return kGS_NoGap; + } + + // If rd_ is currently pointing at an unprocessed packet, then there is no + // current gap. + CHECK(rd_ < capacity_); + if (NULL != ring_[rd_]) { + return kGS_NoGap; + } + + // Looks like there must be a gap here. The start of the gap is the current + // rd sequence number, all we need to do now is determine its length in + // order to compute the end sequence number. + gap->start_seq_ = rd_seq_; + uint16_t end = rd_seq_; + uint32_t tmp = (rd_ + 1) % capacity_; + while ((tmp != wr_) && (NULL == ring_[tmp])) { + ++end; + tmp = (tmp + 1) % capacity_; + } + gap->end_seq_ = end; + + return kGS_NormalGap; +} + +void AAH_RXPlayer::RXRingBuffer::processNAK(const SeqNoGap* nak) { + AutoMutex lock(&lock_); + CHECK(NULL != ring_); + + // If we were waiting for our first fast start fill-in packet, and we + // received a NAK, then apparantly we are not getting our fast start. Just + // clear the waiting flag and go back to normal behavior. + if (waiting_for_fast_start_) { + waiting_for_fast_start_ = false; + } + + // If we have not received a packet since last reset, or there is no data in + // the ring, then there is nothing to skip. + if ((!rd_seq_known_) || (rd_ == wr_)) { + return; + } + + // If rd_ is currently pointing at an unprocessed packet, then there is no + // gap to skip. + CHECK(rd_ < capacity_); + if (NULL != ring_[rd_]) { + return; + } + + // Looks like there must be a gap here. Advance rd until we have passed + // over the portion of it indicated by nak (or all of the gap if nak is + // NULL). Then reset fetched_first_packet_ so that the next read will show + // up as being discontiguous. + uint16_t seq_after_gap = (NULL == nak) ? 0 : nak->end_seq_ + 1; + while ((rd_ != wr_) && + (NULL == ring_[rd_]) && + ((NULL == nak) || (seq_after_gap != rd_seq_))) { + rd_ = (rd_ + 1) % capacity_; + ++rd_seq_; + } + fetched_first_packet_ = false; +} + +int AAH_RXPlayer::RXRingBuffer::computeInactivityTimeout() { + AutoMutex lock(&lock_); + + if (!rtp_activity_timeout_valid_) { + return -1; + } + + uint64_t now = monotonicUSecNow(); + if (rtp_activity_timeout_ <= now) { + return 0; + } + + return (rtp_activity_timeout_ - now) / 1000; +} + +AAH_RXPlayer::PacketBuffer* +AAH_RXPlayer::PacketBuffer::allocate(ssize_t length) { + if (length <= 0) { + return NULL; + } + + uint32_t alloc_len = sizeof(PacketBuffer) + length; + PacketBuffer* ret = reinterpret_cast<PacketBuffer*>( + new uint8_t[alloc_len]); + + if (NULL != ret) { + ret->length_ = length; + } + + return ret; +} + +void AAH_RXPlayer::PacketBuffer::destroy(PacketBuffer* pb) { + uint8_t* kill_me = reinterpret_cast<uint8_t*>(pb); + delete[] kill_me; +} + +} // namespace android diff --git a/media/libaah_rtp/aah_rx_player_substream.cpp b/media/libaah_rtp/aah_rx_player_substream.cpp new file mode 100644 index 0000000..18b0e2b --- /dev/null +++ b/media/libaah_rtp/aah_rx_player_substream.cpp @@ -0,0 +1,677 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "LibAAH_RTP" +//#define LOG_NDEBUG 0 + +#include <utils/Log.h> + +#include <include/avc_utils.h> +#include <media/stagefright/MediaBuffer.h> +#include <media/stagefright/MediaDefs.h> +#include <media/stagefright/MetaData.h> +#include <media/stagefright/OMXCodec.h> +#include <media/stagefright/Utils.h> + +#include "aah_rx_player.h" +#include "aah_tx_packet.h" + +inline uint32_t min(uint32_t a, uint32_t b) { + return (a < b ? a : b); +} + +namespace android { + +int64_t AAH_RXPlayer::Substream::kAboutToUnderflowThreshold = + 50ull * 1000; + +AAH_RXPlayer::Substream::Substream(uint32_t ssrc, OMXClient& omx) { + ssrc_ = ssrc; + substream_details_known_ = false; + buffer_in_progress_ = NULL; + status_ = OK; + codec_mime_type_ = ""; + + decoder_ = new AAH_DecoderPump(omx); + if (decoder_ == NULL) { + ALOGE("%s failed to allocate decoder pump!", __PRETTY_FUNCTION__); + } + if (OK != decoder_->initCheck()) { + ALOGE("%s failed to initialize decoder pump!", __PRETTY_FUNCTION__); + } + + // cleanupBufferInProgress will reset most of the internal state variables. + // Just need to make sure that buffer_in_progress_ is NULL before calling. + cleanupBufferInProgress(); +} + +AAH_RXPlayer::Substream::~Substream() { + shutdown(); +} + +void AAH_RXPlayer::Substream::shutdown() { + substream_meta_ = NULL; + status_ = OK; + cleanupBufferInProgress(); + cleanupDecoder(); +} + +void AAH_RXPlayer::Substream::cleanupBufferInProgress() { + if (NULL != buffer_in_progress_) { + buffer_in_progress_->release(); + buffer_in_progress_ = NULL; + } + + expected_buffer_size_ = 0; + buffer_filled_ = 0; + waiting_for_rap_ = true; + + aux_data_in_progress_.clear(); + aux_data_expected_size_ = 0; +} + +void AAH_RXPlayer::Substream::cleanupDecoder() { + if (decoder_ != NULL) { + decoder_->shutdown(); + } +} + +bool AAH_RXPlayer::Substream::shouldAbort(const char* log_tag) { + // If we have already encountered a fatal error, do nothing. We are just + // waiting for our owner to shut us down now. + if (OK != status_) { + ALOGV("Skipping %s, substream has encountered fatal error (%d).", + log_tag, status_); + return true; + } + + return false; +} + +void AAH_RXPlayer::Substream::processPayloadStart(uint8_t* buf, + uint32_t amt, + int32_t ts_lower) { + uint32_t min_length = 6; + + if (shouldAbort(__PRETTY_FUNCTION__)) { + return; + } + + // Do we have a buffer in progress already? If so, abort the buffer. In + // theory, this should never happen. If there were a discontinutity in the + // stream, the discon in the seq_nos at the RTP level should have already + // triggered a cleanup of the buffer in progress. To see a problem at this + // level is an indication either of a bug in the transmitter, or some form + // of terrible corruption/tampering on the wire. + if (NULL != buffer_in_progress_) { + ALOGE("processPayloadStart is aborting payload already in progress."); + cleanupBufferInProgress(); + } + + // Parse enough of the header to know where we stand. Since this is a + // payload start, it should begin with a TRTP header which has to be at + // least 6 bytes long. + if (amt < min_length) { + ALOGV("Discarding payload too short to contain TRTP header (len = %u)", + amt); + return; + } + + // Check the TRTP version number. + if (0x01 != buf[0]) { + ALOGV("Unexpected TRTP version (%u) in header. Expected %u.", + buf[0], 1); + return; + } + + // Extract the substream type field and make sure its one we understand (and + // one that does not conflict with any previously received substream type. + uint8_t header_type = (buf[1] >> 4) & 0xF; + switch (header_type) { + case TRTPPacket::kHeaderTypeAudio: + // Audio, yay! Just break. We understand audio payloads. + break; + case TRTPPacket::kHeaderTypeVideo: + ALOGV("RXed packet with unhandled TRTP header type (Video)."); + return; + case TRTPPacket::kHeaderTypeSubpicture: + ALOGV("RXed packet with unhandled TRTP header type (Subpicture)."); + return; + case TRTPPacket::kHeaderTypeControl: + ALOGV("RXed packet with unhandled TRTP header type (Control)."); + return; + default: + ALOGV("RXed packet with unhandled TRTP header type (%u).", + header_type); + return; + } + + if (substream_details_known_ && (header_type != substream_type_)) { + ALOGV("RXed TRTP Payload for SSRC=0x%08x where header type (%u) does" + " not match previously received header type (%u)", + ssrc_, header_type, substream_type_); + return; + } + + // Check the flags to see if there is another 32 bits of timestamp present. + uint32_t trtp_header_len = 6; + bool ts_valid = buf[1] & TRTPPacket::kFlag_TSValid; + if (ts_valid) { + min_length += 4; + trtp_header_len += 4; + if (amt < min_length) { + ALOGV("Discarding payload too short to contain TRTP timestamp" + " (len = %u)", amt); + return; + } + } + + // Extract the TRTP length field and sanity check it. + uint32_t trtp_len = U32_AT(buf + 2); + if (trtp_len < min_length) { + ALOGV("TRTP length (%u) is too short to be valid. Must be at least %u" + " bytes.", trtp_len, min_length); + return; + } + + // Extract the rest of the timestamp field if valid. + int64_t ts = 0; + uint32_t parse_offset = 6; + if (ts_valid) { + uint32_t ts_upper = U32_AT(buf + parse_offset); + parse_offset += 4; + ts = (static_cast<int64_t>(ts_upper) << 32) | ts_lower; + } + + // Check the flags to see if there is another 24 bytes of timestamp + // transformation present. + if (buf[1] & TRTPPacket::kFlag_TSTransformPresent) { + min_length += 24; + parse_offset += 24; + trtp_header_len += 24; + if (amt < min_length) { + ALOGV("Discarding payload too short to contain TRTP timestamp" + " transformation (len = %u)", amt); + return; + } + } + + // TODO : break the parsing into individual parsers for the different + // payload types (audio, video, etc). + // + // At this point in time, we know that this is audio. Go ahead and parse + // the basic header, check the codec type, and find the payload portion of + // the packet. + min_length += 3; + if (trtp_len < min_length) { + ALOGV("TRTP length (%u) is too short to be a valid audio payload. Must" + " be at least %u bytes.", trtp_len, min_length); + return; + } + + if (amt < min_length) { + ALOGV("TRTP porttion of RTP payload (%u bytes) too small to contain" + " entire TRTP header. TRTP does not currently support" + " fragmenting TRTP headers across RTP payloads", amt); + return; + } + + uint8_t codec_type = buf[parse_offset ]; + uint8_t flags = buf[parse_offset + 1]; + uint8_t volume = buf[parse_offset + 2]; + parse_offset += 3; + trtp_header_len += 3; + + if (!setupSubstreamType(header_type, codec_type)) { + return; + } + + if (decoder_ != NULL) { + decoder_->setRenderVolume(volume); + } + + if (waiting_for_rap_ && !(flags & TRTPAudioPacket::kFlag_RandomAccessPoint)) { + ALOGV("Dropping non-RAP TRTP Audio Payload while waiting for RAP."); + return; + } + + // Check for the presence of codec aux data. + if (flags & TRTPAudioPacket::kFlag_AuxLengthPresent) { + min_length += 4; + trtp_header_len += 4; + + if (trtp_len < min_length) { + ALOGV("TRTP length (%u) is too short to be a valid audio payload. " + "Must be at least %u bytes.", trtp_len, min_length); + return; + } + + if (amt < min_length) { + ALOGV("TRTP porttion of RTP payload (%u bytes) too small to contain" + " entire TRTP header. TRTP does not currently support" + " fragmenting TRTP headers across RTP payloads", amt); + return; + } + + aux_data_expected_size_ = U32_AT(buf + parse_offset); + aux_data_in_progress_.clear(); + if (aux_data_in_progress_.capacity() < aux_data_expected_size_) { + aux_data_in_progress_.setCapacity(aux_data_expected_size_); + } + } else { + aux_data_expected_size_ = 0; + } + + if ((aux_data_expected_size_ + trtp_header_len) > trtp_len) { + ALOGV("Expected codec aux data length (%u) and TRTP header overhead" + " (%u) too large for total TRTP payload length (%u).", + aux_data_expected_size_, trtp_header_len, trtp_len); + return; + } + + // OK - everything left is just payload. Compute the payload size, start + // the buffer in progress and pack as much payload as we can into it. If + // the payload is finished once we are done, go ahead and send the payload + // to the decoder. + expected_buffer_size_ = trtp_len + - trtp_header_len + - aux_data_expected_size_; + if (!expected_buffer_size_) { + ALOGV("Dropping TRTP Audio Payload with 0 Access Unit length"); + return; + } + + CHECK(amt >= trtp_header_len); + uint32_t todo = amt - trtp_header_len; + if ((expected_buffer_size_ + aux_data_expected_size_) < todo) { + ALOGV("Extra data (%u > %u) present in initial TRTP Audio Payload;" + " dropping payload.", todo, + expected_buffer_size_ + aux_data_expected_size_); + return; + } + + buffer_filled_ = 0; + buffer_in_progress_ = new MediaBuffer(expected_buffer_size_); + if ((NULL == buffer_in_progress_) || + (NULL == buffer_in_progress_->data())) { + ALOGV("Failed to allocate MediaBuffer of length %u", + expected_buffer_size_); + cleanupBufferInProgress(); + return; + } + + sp<MetaData> meta = buffer_in_progress_->meta_data(); + if (meta == NULL) { + ALOGV("Missing metadata structure in allocated MediaBuffer; dropping" + " payload"); + cleanupBufferInProgress(); + return; + } + + meta->setCString(kKeyMIMEType, codec_mime_type_); + if (ts_valid) { + meta->setInt64(kKeyTime, ts); + } + + // Skip over the header we have already extracted. + amt -= trtp_header_len; + buf += trtp_header_len; + + // Extract as much of the expected aux data as we can. + todo = min(aux_data_expected_size_, amt); + if (todo) { + aux_data_in_progress_.appendArray(buf, todo); + buf += todo; + amt -= todo; + } + + // Extract as much of the expected payload as we can. + todo = min(expected_buffer_size_, amt); + if (todo > 0) { + uint8_t* tgt = + reinterpret_cast<uint8_t*>(buffer_in_progress_->data()); + memcpy(tgt, buf, todo); + buffer_filled_ = amt; + buf += todo; + amt -= todo; + } + + if (buffer_filled_ >= expected_buffer_size_) { + processCompletedBuffer(); + } +} + +void AAH_RXPlayer::Substream::processPayloadCont(uint8_t* buf, + uint32_t amt) { + if (shouldAbort(__PRETTY_FUNCTION__)) { + return; + } + + if (NULL == buffer_in_progress_) { + ALOGV("TRTP Receiver skipping payload continuation; no buffer currently" + " in progress."); + return; + } + + CHECK(aux_data_in_progress_.size() <= aux_data_expected_size_); + uint32_t aux_left = aux_data_expected_size_ - aux_data_in_progress_.size(); + if (aux_left) { + uint32_t todo = min(aux_left, amt); + aux_data_in_progress_.appendArray(buf, todo); + amt -= todo; + buf += todo; + + if (!amt) + return; + } + + CHECK(buffer_filled_ < expected_buffer_size_); + uint32_t buffer_left = expected_buffer_size_ - buffer_filled_; + if (amt > buffer_left) { + ALOGV("Extra data (%u > %u) present in continued TRTP Audio Payload;" + " dropping payload.", amt, buffer_left); + cleanupBufferInProgress(); + return; + } + + if (amt > 0) { + uint8_t* tgt = + reinterpret_cast<uint8_t*>(buffer_in_progress_->data()); + memcpy(tgt + buffer_filled_, buf, amt); + buffer_filled_ += amt; + } + + if (buffer_filled_ >= expected_buffer_size_) { + processCompletedBuffer(); + } +} + +void AAH_RXPlayer::Substream::processCompletedBuffer() { + status_t res; + + CHECK(NULL != buffer_in_progress_); + + if (decoder_ == NULL) { + ALOGV("Dropping complete buffer, no decoder pump allocated"); + goto bailout; + } + + // Make sure our metadata used to initialize the decoder has been properly + // set up. + if (!setupSubstreamMeta()) + goto bailout; + + // If our decoder has not be set up, do so now. + res = decoder_->init(substream_meta_); + if (OK != res) { + ALOGE("Failed to init decoder (res = %d)", res); + cleanupDecoder(); + substream_meta_ = NULL; + goto bailout; + } + + // Queue the payload for decode. + res = decoder_->queueForDecode(buffer_in_progress_); + + if (res != OK) { + ALOGD("Failed to queue payload for decode, resetting decoder pump!" + " (res = %d)", res); + status_ = res; + cleanupDecoder(); + cleanupBufferInProgress(); + } + + // NULL out buffer_in_progress before calling the cleanup helper. + // + // MediaBuffers use something of a hybrid ref-counting pattern which prevent + // the AAH_DecoderPump's input queue from adding their own reference to the + // MediaBuffer. MediaBuffers start life with a reference count of 0, as + // well as an observer which starts as NULL. Before being given an + // observer, the ref count cannot be allowed to become non-zero as it will + // cause calls to release() to assert. Basically, before a MediaBuffer has + // an observer, they behave like non-ref counted obects where release() + // serves the roll of delete. After a MediaBuffer has an observer, they + // become more like ref counted objects where add ref and release can be + // used, and when the ref count hits zero, the MediaBuffer is handed off to + // the observer. + // + // Given all of this, when we give the buffer to the decoder pump to wait in + // the to-be-processed queue, the decoder cannot add a ref to the buffer as + // it would in a traditional ref counting system. Instead it needs to + // "steal" the non-existent ref. In the case of queue failure, we need to + // make certain to release this non-existent reference so that the buffer is + // cleaned up during the cleanupBufferInProgress helper. In the case of a + // successful queue operation, we need to make certain that the + // cleanupBufferInProgress helper does not release the buffer since it needs + // to remain alive in the queue. We acomplish this by NULLing out the + // buffer pointer before calling the cleanup helper. + buffer_in_progress_ = NULL; + +bailout: + cleanupBufferInProgress(); +} + +bool AAH_RXPlayer::Substream::setupSubstreamMeta() { + switch (codec_type_) { + case TRTPAudioPacket::kCodecMPEG1Audio: + codec_mime_type_ = MEDIA_MIMETYPE_AUDIO_MPEG; + return setupMP3SubstreamMeta(); + + case TRTPAudioPacket::kCodecAACAudio: + codec_mime_type_ = MEDIA_MIMETYPE_AUDIO_AAC; + return setupAACSubstreamMeta(); + + default: + ALOGV("Failed to setup substream metadata for unsupported codec" + " type (%u)", codec_type_); + break; + } + + return false; +} + +bool AAH_RXPlayer::Substream::setupMP3SubstreamMeta() { + const uint8_t* buffer_data = NULL; + int sample_rate; + int channel_count; + size_t frame_size; + status_t res; + + buffer_data = reinterpret_cast<const uint8_t*>(buffer_in_progress_->data()); + if (buffer_in_progress_->size() < 4) { + ALOGV("MP3 payload too short to contain header, dropping payload."); + return false; + } + + // Extract the channel count and the sample rate from the MP3 header. The + // stagefright MP3 requires that these be delivered before decoing can + // begin. + if (!GetMPEGAudioFrameSize(U32_AT(buffer_data), + &frame_size, + &sample_rate, + &channel_count, + NULL, + NULL)) { + ALOGV("Failed to parse MP3 header in payload, droping payload."); + return false; + } + + + // Make sure that our substream metadata is set up properly. If there has + // been a format change, be sure to reset the underlying decoder. In + // stagefright, it seems like the only way to do this is to destroy and + // recreate the decoder. + if (substream_meta_ == NULL) { + substream_meta_ = new MetaData(); + + if (substream_meta_ == NULL) { + ALOGE("Failed to allocate MetaData structure for MP3 substream"); + return false; + } + + substream_meta_->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG); + substream_meta_->setInt32 (kKeyChannelCount, channel_count); + substream_meta_->setInt32 (kKeySampleRate, sample_rate); + } else { + int32_t prev_sample_rate; + int32_t prev_channel_count; + substream_meta_->findInt32(kKeySampleRate, &prev_sample_rate); + substream_meta_->findInt32(kKeyChannelCount, &prev_channel_count); + + if ((prev_channel_count != channel_count) || + (prev_sample_rate != sample_rate)) { + ALOGW("MP3 format change detected, forcing decoder reset."); + cleanupDecoder(); + + substream_meta_->setInt32(kKeyChannelCount, channel_count); + substream_meta_->setInt32(kKeySampleRate, sample_rate); + } + } + + return true; +} + +bool AAH_RXPlayer::Substream::setupAACSubstreamMeta() { + int32_t sample_rate, channel_cnt; + static const size_t overhead = sizeof(sample_rate) + + sizeof(channel_cnt); + + if (aux_data_in_progress_.size() < overhead) { + ALOGE("Not enough aux data (%u) to initialize AAC substream decoder", + aux_data_in_progress_.size()); + return false; + } + + const uint8_t* aux_data = aux_data_in_progress_.array(); + size_t aux_data_size = aux_data_in_progress_.size(); + sample_rate = U32_AT(aux_data); + channel_cnt = U32_AT(aux_data + sizeof(sample_rate)); + + const uint8_t* esds_data = NULL; + size_t esds_data_size = 0; + if (aux_data_size > overhead) { + esds_data = aux_data + overhead; + esds_data_size = aux_data_size - overhead; + } + + // Do we already have metadata? If so, has it changed at all? If not, then + // there should be nothing else to do. Otherwise, release our old stream + // metadata and make new metadata. + if (substream_meta_ != NULL) { + uint32_t type; + const void* data; + size_t size; + int32_t prev_sample_rate; + int32_t prev_channel_count; + bool res; + + res = substream_meta_->findInt32(kKeySampleRate, &prev_sample_rate); + CHECK(res); + res = substream_meta_->findInt32(kKeyChannelCount, &prev_channel_count); + CHECK(res); + + // If nothing has changed about the codec aux data (esds, sample rate, + // channel count), then we can just do nothing and get out. Otherwise, + // we will need to reset the decoder and make a new metadata object to + // deal with the format change. + bool hasData = (esds_data != NULL); + bool hadData = substream_meta_->findData(kKeyESDS, &type, &data, &size); + bool esds_change = (hadData != hasData); + + if (!esds_change && hasData) + esds_change = ((size != esds_data_size) || + memcmp(data, esds_data, size)); + + if (!esds_change && + (prev_sample_rate == sample_rate) && + (prev_channel_count == channel_cnt)) { + return true; // no change, just get out. + } + + ALOGW("AAC format change detected, forcing decoder reset."); + cleanupDecoder(); + substream_meta_ = NULL; + } + + CHECK(substream_meta_ == NULL); + + substream_meta_ = new MetaData(); + if (substream_meta_ == NULL) { + ALOGE("Failed to allocate MetaData structure for AAC substream"); + return false; + } + + substream_meta_->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC); + substream_meta_->setInt32 (kKeySampleRate, sample_rate); + substream_meta_->setInt32 (kKeyChannelCount, channel_cnt); + + if (esds_data) { + substream_meta_->setData(kKeyESDS, kTypeESDS, + esds_data, esds_data_size); + } + + return true; +} + +void AAH_RXPlayer::Substream::processTSTransform(const LinearTransform& trans) { + if (decoder_ != NULL) { + decoder_->setRenderTSTransform(trans); + } +} + +bool AAH_RXPlayer::Substream::isAboutToUnderflow() { + if (decoder_ == NULL) { + return false; + } + + return decoder_->isAboutToUnderflow(kAboutToUnderflowThreshold); +} + +bool AAH_RXPlayer::Substream::setupSubstreamType(uint8_t substream_type, + uint8_t codec_type) { + // Sanity check the codec type. Right now we only support MP3 and AAC. + // Also check for conflicts with previously delivered codec types. + if (substream_details_known_) { + if (codec_type != codec_type_) { + ALOGV("RXed TRTP Payload for SSRC=0x%08x where codec type (%u) does" + " not match previously received codec type (%u)", + ssrc_, codec_type, codec_type_); + return false; + } + + return true; + } + + switch (codec_type) { + // MP3 and AAC are all we support right now. + case TRTPAudioPacket::kCodecMPEG1Audio: + case TRTPAudioPacket::kCodecAACAudio: + break; + + default: + ALOGV("RXed TRTP Audio Payload for SSRC=0x%08x with unsupported" + " codec type (%u)", ssrc_, codec_type); + return false; + } + + substream_type_ = substream_type; + codec_type_ = codec_type; + substream_details_known_ = true; + + return true; +} + +} // namespace android diff --git a/media/libaah_rtp/aah_tx_packet.cpp b/media/libaah_rtp/aah_tx_packet.cpp new file mode 100644 index 0000000..4cd6e47 --- /dev/null +++ b/media/libaah_rtp/aah_tx_packet.cpp @@ -0,0 +1,344 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "LibAAH_RTP" +#include <utils/Log.h> + +#include <arpa/inet.h> +#include <string.h> + +#include <media/stagefright/foundation/ADebug.h> + +#include "aah_tx_packet.h" + +namespace android { + +const int TRTPPacket::kRTPHeaderLen; +const uint32_t TRTPPacket::kTRTPEpochMask; + +TRTPPacket::~TRTPPacket() { + delete mPacket; +} + +/*** TRTP packet properties ***/ + +void TRTPPacket::setSeqNumber(uint16_t val) { + mSeqNumber = val; + + if (mIsPacked) { + const int kTRTPSeqNumberOffset = 2; + uint16_t* buf = reinterpret_cast<uint16_t*>( + mPacket + kTRTPSeqNumberOffset); + *buf = htons(mSeqNumber); + } +} + +uint16_t TRTPPacket::getSeqNumber() const { + return mSeqNumber; +} + +void TRTPPacket::setPTS(int64_t val) { + CHECK(!mIsPacked); + mPTS = val; + mPTSValid = true; +} + +int64_t TRTPPacket::getPTS() const { + return mPTS; +} + +void TRTPPacket::setEpoch(uint32_t val) { + mEpoch = val; + + if (mIsPacked) { + const int kTRTPEpochOffset = 8; + uint32_t* buf = reinterpret_cast<uint32_t*>( + mPacket + kTRTPEpochOffset); + uint32_t val = ntohl(*buf); + val &= ~(kTRTPEpochMask << kTRTPEpochShift); + val |= (mEpoch & kTRTPEpochMask) << kTRTPEpochShift; + *buf = htonl(val); + } +} + +void TRTPPacket::setProgramID(uint16_t val) { + CHECK(!mIsPacked); + mProgramID = val; +} + +void TRTPPacket::setSubstreamID(uint16_t val) { + CHECK(!mIsPacked); + mSubstreamID = val; +} + + +void TRTPPacket::setClockTransform(const LinearTransform& trans) { + CHECK(!mIsPacked); + mClockTranform = trans; + mClockTranformValid = true; +} + +uint8_t* TRTPPacket::getPacket() const { + CHECK(mIsPacked); + return mPacket; +} + +int TRTPPacket::getPacketLen() const { + CHECK(mIsPacked); + return mPacketLen; +} + +void TRTPPacket::setExpireTime(nsecs_t val) { + CHECK(!mIsPacked); + mExpireTime = val; +} + +nsecs_t TRTPPacket::getExpireTime() const { + return mExpireTime; +} + +/*** TRTP audio packet properties ***/ + +void TRTPAudioPacket::setCodecType(TRTPAudioCodecType val) { + CHECK(!mIsPacked); + mCodecType = val; +} + +void TRTPAudioPacket::setRandomAccessPoint(bool val) { + CHECK(!mIsPacked); + mRandomAccessPoint = val; +} + +void TRTPAudioPacket::setDropable(bool val) { + CHECK(!mIsPacked); + mDropable = val; +} + +void TRTPAudioPacket::setDiscontinuity(bool val) { + CHECK(!mIsPacked); + mDiscontinuity = val; +} + +void TRTPAudioPacket::setEndOfStream(bool val) { + CHECK(!mIsPacked); + mEndOfStream = val; +} + +void TRTPAudioPacket::setVolume(uint8_t val) { + CHECK(!mIsPacked); + mVolume = val; +} + +void TRTPAudioPacket::setAccessUnitData(const void* data, size_t len) { + CHECK(!mIsPacked); + mAccessUnitData = data; + mAccessUnitLen = len; +} + +void TRTPAudioPacket::setAuxData(const void* data, size_t len) { + CHECK(!mIsPacked); + mAuxData = data; + mAuxDataLen = len; +} + +/*** TRTP control packet properties ***/ + +void TRTPControlPacket::setCommandID(TRTPCommandID val) { + CHECK(!mIsPacked); + mCommandID = val; +} + +/*** TRTP packet serializers ***/ + +void TRTPPacket::writeU8(uint8_t*& buf, uint8_t val) { + *buf = val; + buf++; +} + +void TRTPPacket::writeU16(uint8_t*& buf, uint16_t val) { + *reinterpret_cast<uint16_t*>(buf) = htons(val); + buf += 2; +} + +void TRTPPacket::writeU32(uint8_t*& buf, uint32_t val) { + *reinterpret_cast<uint32_t*>(buf) = htonl(val); + buf += 4; +} + +void TRTPPacket::writeU64(uint8_t*& buf, uint64_t val) { + buf[0] = static_cast<uint8_t>(val >> 56); + buf[1] = static_cast<uint8_t>(val >> 48); + buf[2] = static_cast<uint8_t>(val >> 40); + buf[3] = static_cast<uint8_t>(val >> 32); + buf[4] = static_cast<uint8_t>(val >> 24); + buf[5] = static_cast<uint8_t>(val >> 16); + buf[6] = static_cast<uint8_t>(val >> 8); + buf[7] = static_cast<uint8_t>(val); + buf += 8; +} + +void TRTPPacket::writeTRTPHeader(uint8_t*& buf, + bool isFirstFragment, + int totalPacketLen) { + // RTP header + writeU8(buf, + ((mVersion & 0x03) << 6) | + (static_cast<int>(mPadding) << 5) | + (static_cast<int>(mExtension) << 4) | + (mCsrcCount & 0x0F)); + writeU8(buf, + (static_cast<int>(isFirstFragment) << 7) | + (mPayloadType & 0x7F)); + writeU16(buf, mSeqNumber); + if (isFirstFragment && mPTSValid) { + writeU32(buf, mPTS & 0xFFFFFFFF); + } else { + writeU32(buf, 0); + } + writeU32(buf, + ((mEpoch & kTRTPEpochMask) << kTRTPEpochShift) | + ((mProgramID & 0x1F) << 5) | + (mSubstreamID & 0x1F)); + + // TRTP header + writeU8(buf, mTRTPVersion); + writeU8(buf, + ((mTRTPHeaderType & 0x0F) << 4) | + (mClockTranformValid ? 0x02 : 0x00) | + (mPTSValid ? 0x01 : 0x00)); + writeU32(buf, totalPacketLen - kRTPHeaderLen); + if (mPTSValid) { + writeU32(buf, mPTS >> 32); + } + + if (mClockTranformValid) { + writeU64(buf, mClockTranform.a_zero); + writeU32(buf, mClockTranform.a_to_b_numer); + writeU32(buf, mClockTranform.a_to_b_denom); + writeU64(buf, mClockTranform.b_zero); + } +} + +bool TRTPAudioPacket::pack() { + if (mIsPacked) { + return false; + } + + int packetLen = kRTPHeaderLen + + mAuxDataLen + + mAccessUnitLen + + TRTPHeaderLen(); + + // TODO : support multiple fragments + const int kMaxUDPPayloadLen = 65507; + if (packetLen > kMaxUDPPayloadLen) { + return false; + } + + mPacket = new uint8_t[packetLen]; + if (!mPacket) { + return false; + } + + mPacketLen = packetLen; + + uint8_t* cur = mPacket; + bool hasAux = mAuxData && mAuxDataLen; + uint8_t flags = (static_cast<int>(hasAux) << 4) | + (static_cast<int>(mRandomAccessPoint) << 3) | + (static_cast<int>(mDropable) << 2) | + (static_cast<int>(mDiscontinuity) << 1) | + (static_cast<int>(mEndOfStream)); + + writeTRTPHeader(cur, true, packetLen); + writeU8(cur, mCodecType); + writeU8(cur, flags); + writeU8(cur, mVolume); + + if (hasAux) { + writeU32(cur, mAuxDataLen); + memcpy(cur, mAuxData, mAuxDataLen); + cur += mAuxDataLen; + } + + memcpy(cur, mAccessUnitData, mAccessUnitLen); + + mIsPacked = true; + return true; +} + +int TRTPPacket::TRTPHeaderLen() const { + // 6 bytes for version, payload type, flags and length. An additional 4 if + // there are upper timestamp bits present and another 24 if there is a clock + // transformation present. + return 6 + + (mClockTranformValid ? 24 : 0) + + (mPTSValid ? 4 : 0); +} + +int TRTPAudioPacket::TRTPHeaderLen() const { + // TRTPPacket::TRTPHeaderLen() for the base TRTPHeader. 3 bytes for audio's + // codec type, flags and volume field. Another 5 bytes if the codec type is + // PCM and we are sending sample rate/channel count. as well as however long + // the aux data (if present) is. + + int pcmParamLength; + switch(mCodecType) { + case kCodecPCMBigEndian: + case kCodecPCMLittleEndian: + pcmParamLength = 5; + break; + + default: + pcmParamLength = 0; + break; + } + + + int auxDataLenField = (NULL != mAuxData) ? sizeof(uint32_t) : 0; + return TRTPPacket::TRTPHeaderLen() + + 3 + + auxDataLenField + + pcmParamLength; +} + +bool TRTPControlPacket::pack() { + if (mIsPacked) { + return false; + } + + // command packets contain a 2-byte command ID + int packetLen = kRTPHeaderLen + + TRTPHeaderLen() + + 2; + + mPacket = new uint8_t[packetLen]; + if (!mPacket) { + return false; + } + + mPacketLen = packetLen; + + uint8_t* cur = mPacket; + + writeTRTPHeader(cur, true, packetLen); + writeU16(cur, mCommandID); + + mIsPacked = true; + return true; +} + +} // namespace android diff --git a/media/libaah_rtp/aah_tx_packet.h b/media/libaah_rtp/aah_tx_packet.h new file mode 100644 index 0000000..7f78ea0 --- /dev/null +++ b/media/libaah_rtp/aah_tx_packet.h @@ -0,0 +1,213 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AAH_TX_PACKET_H__ +#define __AAH_TX_PACKET_H__ + +#include <media/stagefright/foundation/ABase.h> +#include <utils/LinearTransform.h> +#include <utils/RefBase.h> +#include <utils/Timers.h> + +namespace android { + +class TRTPPacket : public RefBase { + public: + enum TRTPHeaderType { + kHeaderTypeAudio = 1, + kHeaderTypeVideo = 2, + kHeaderTypeSubpicture = 3, + kHeaderTypeControl = 4, + }; + + enum TRTPPayloadFlags { + kFlag_TSTransformPresent = 0x02, + kFlag_TSValid = 0x01, + }; + + protected: + TRTPPacket(TRTPHeaderType headerType) + : mIsPacked(false) + , mVersion(2) + , mPadding(false) + , mExtension(false) + , mCsrcCount(0) + , mPayloadType(100) + , mSeqNumber(0) + , mPTSValid(false) + , mPTS(0) + , mEpoch(0) + , mProgramID(0) + , mSubstreamID(0) + , mClockTranformValid(false) + , mTRTPVersion(1) + , mTRTPLength(0) + , mTRTPHeaderType(headerType) + , mPacket(NULL) + , mPacketLen(0) { } + + public: + virtual ~TRTPPacket(); + + void setSeqNumber(uint16_t val); + uint16_t getSeqNumber() const; + + void setPTS(int64_t val); + int64_t getPTS() const; + + void setEpoch(uint32_t val); + void setProgramID(uint16_t val); + void setSubstreamID(uint16_t val); + void setClockTransform(const LinearTransform& trans); + + uint8_t* getPacket() const; + int getPacketLen() const; + + void setExpireTime(nsecs_t val); + nsecs_t getExpireTime() const; + + virtual bool pack() = 0; + + // mask for the number of bits in a TRTP epoch + static const uint32_t kTRTPEpochMask = (1 << 22) - 1; + static const int kTRTPEpochShift = 10; + + protected: + static const int kRTPHeaderLen = 12; + virtual int TRTPHeaderLen() const; + + void writeTRTPHeader(uint8_t*& buf, + bool isFirstFragment, + int totalPacketLen); + + void writeU8(uint8_t*& buf, uint8_t val); + void writeU16(uint8_t*& buf, uint16_t val); + void writeU32(uint8_t*& buf, uint32_t val); + void writeU64(uint8_t*& buf, uint64_t val); + + bool mIsPacked; + + uint8_t mVersion; + bool mPadding; + bool mExtension; + uint8_t mCsrcCount; + uint8_t mPayloadType; + uint16_t mSeqNumber; + bool mPTSValid; + int64_t mPTS; + uint32_t mEpoch; + uint16_t mProgramID; + uint16_t mSubstreamID; + LinearTransform mClockTranform; + bool mClockTranformValid; + uint8_t mTRTPVersion; + uint32_t mTRTPLength; + TRTPHeaderType mTRTPHeaderType; + + uint8_t* mPacket; + int mPacketLen; + + nsecs_t mExpireTime; + + DISALLOW_EVIL_CONSTRUCTORS(TRTPPacket); +}; + +class TRTPAudioPacket : public TRTPPacket { + public: + enum AudioPayloadFlags { + kFlag_AuxLengthPresent = 0x10, + kFlag_RandomAccessPoint = 0x08, + kFlag_Dropable = 0x04, + kFlag_Discontinuity = 0x02, + kFlag_EndOfStream = 0x01, + }; + + TRTPAudioPacket() + : TRTPPacket(kHeaderTypeAudio) + , mCodecType(kCodecInvalid) + , mRandomAccessPoint(false) + , mDropable(false) + , mDiscontinuity(false) + , mEndOfStream(false) + , mVolume(0) + , mAccessUnitData(NULL) + , mAccessUnitLen(0) + , mAuxData(NULL) + , mAuxDataLen(0) { } + + enum TRTPAudioCodecType { + kCodecInvalid = 0, + kCodecPCMBigEndian = 1, + kCodecPCMLittleEndian = 2, + kCodecMPEG1Audio = 3, + kCodecAACAudio = 4, + }; + + void setCodecType(TRTPAudioCodecType val); + void setRandomAccessPoint(bool val); + void setDropable(bool val); + void setDiscontinuity(bool val); + void setEndOfStream(bool val); + void setVolume(uint8_t val); + void setAccessUnitData(const void* data, size_t len); + void setAuxData(const void* data, size_t len); + + virtual bool pack(); + + protected: + virtual int TRTPHeaderLen() const; + + private: + TRTPAudioCodecType mCodecType; + bool mRandomAccessPoint; + bool mDropable; + bool mDiscontinuity; + bool mEndOfStream; + uint8_t mVolume; + + const void* mAccessUnitData; + size_t mAccessUnitLen; + const void* mAuxData; + size_t mAuxDataLen; + + DISALLOW_EVIL_CONSTRUCTORS(TRTPAudioPacket); +}; + +class TRTPControlPacket : public TRTPPacket { + public: + TRTPControlPacket() + : TRTPPacket(kHeaderTypeControl) + , mCommandID(kCommandNop) {} + + enum TRTPCommandID { + kCommandNop = 1, + kCommandFlush = 2, + kCommandEOS = 3, + }; + + void setCommandID(TRTPCommandID val); + + virtual bool pack(); + + private: + TRTPCommandID mCommandID; + + DISALLOW_EVIL_CONSTRUCTORS(TRTPControlPacket); +}; + +} // namespace android + +#endif // __AAH_TX_PLAYER_H__ diff --git a/media/libaah_rtp/aah_tx_player.cpp b/media/libaah_rtp/aah_tx_player.cpp new file mode 100644 index 0000000..974805b --- /dev/null +++ b/media/libaah_rtp/aah_tx_player.cpp @@ -0,0 +1,1177 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "LibAAH_RTP" +#include <utils/Log.h> + +#define __STDC_FORMAT_MACROS +#include <inttypes.h> +#include <netdb.h> +#include <netinet/ip.h> + +#include <common_time/cc_helper.h> +#include <media/IMediaPlayer.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/FileSource.h> +#include <media/stagefright/MediaBuffer.h> +#include <media/stagefright/MediaDefs.h> +#include <media/stagefright/MetaData.h> +#include <utils/Timers.h> + +#include "aah_tx_packet.h" +#include "aah_tx_player.h" + +namespace android { + +static int64_t kLowWaterMarkUs = 2000000ll; // 2secs +static int64_t kHighWaterMarkUs = 10000000ll; // 10secs +static const size_t kLowWaterMarkBytes = 40000; +static const size_t kHighWaterMarkBytes = 200000; + +// When we start up, how much lead time should we put on the first access unit? +static const int64_t kAAHStartupLeadTimeUs = 300000LL; + +// How much time do we attempt to lead the clock by in steady state? +static const int64_t kAAHBufferTimeUs = 1000000LL; + +// how long do we keep data in our retransmit buffer after sending it. +const int64_t AAH_TXPlayer::kAAHRetryKeepAroundTimeNs = + kAAHBufferTimeUs * 1100; + +sp<MediaPlayerBase> createAAH_TXPlayer() { + sp<MediaPlayerBase> ret = new AAH_TXPlayer(); + return ret; +} + +template <typename T> static T clamp(T val, T min, T max) { + if (val < min) { + return min; + } else if (val > max) { + return max; + } else { + return val; + } +} + +struct AAH_TXEvent : public TimedEventQueue::Event { + AAH_TXEvent(AAH_TXPlayer *player, + void (AAH_TXPlayer::*method)()) : mPlayer(player) + , mMethod(method) {} + + protected: + virtual ~AAH_TXEvent() {} + + virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) { + (mPlayer->*mMethod)(); + } + + private: + AAH_TXPlayer *mPlayer; + void (AAH_TXPlayer::*mMethod)(); + + AAH_TXEvent(const AAH_TXEvent &); + AAH_TXEvent& operator=(const AAH_TXEvent &); +}; + +AAH_TXPlayer::AAH_TXPlayer() + : mQueueStarted(false) + , mFlags(0) + , mExtractorFlags(0) { + DataSource::RegisterDefaultSniffers(); + + mBufferingEvent = new AAH_TXEvent(this, &AAH_TXPlayer::onBufferingUpdate); + mBufferingEventPending = false; + + mPumpAudioEvent = new AAH_TXEvent(this, &AAH_TXPlayer::onPumpAudio); + mPumpAudioEventPending = false; + + mAudioCodecData = NULL; + + reset_l(); +} + +AAH_TXPlayer::~AAH_TXPlayer() { + if (mQueueStarted) { + mQueue.stop(); + } + + reset_l(); +} + +void AAH_TXPlayer::cancelPlayerEvents(bool keepBufferingGoing) { + if (!keepBufferingGoing) { + mQueue.cancelEvent(mBufferingEvent->eventID()); + mBufferingEventPending = false; + + mQueue.cancelEvent(mPumpAudioEvent->eventID()); + mPumpAudioEventPending = false; + } +} + +status_t AAH_TXPlayer::initCheck() { + // Check for the presense of the common time service by attempting to query + // for CommonTime's frequency. If we get an error back, we cannot talk to + // the service at all and should abort now. + status_t res; + uint64_t freq; + res = mCCHelper.getCommonFreq(&freq); + if (OK != res) { + ALOGE("Failed to connect to common time service! (res %d)", res); + return res; + } + + return OK; +} + +status_t AAH_TXPlayer::setDataSource( + const char *url, + const KeyedVector<String8, String8> *headers) { + Mutex::Autolock autoLock(mLock); + return setDataSource_l(url, headers); +} + +status_t AAH_TXPlayer::setDataSource_l( + const char *url, + const KeyedVector<String8, String8> *headers) { + reset_l(); + + mUri.setTo(url); + + if (headers) { + mUriHeaders = *headers; + + ssize_t index = mUriHeaders.indexOfKey(String8("x-hide-urls-from-log")); + if (index >= 0) { + // Browser is in "incognito" mode, suppress logging URLs. + + // This isn't something that should be passed to the server. + mUriHeaders.removeItemsAt(index); + + mFlags |= INCOGNITO; + } + } + + // The URL may optionally contain a "#" character followed by a Skyjam + // cookie. Ideally the cookie header should just be passed in the headers + // argument, but the Java API for supplying headers is apparently not yet + // exposed in the SDK used by application developers. + const char kSkyjamCookieDelimiter = '#'; + char* skyjamCookie = strrchr(mUri.string(), kSkyjamCookieDelimiter); + if (skyjamCookie) { + skyjamCookie++; + mUriHeaders.add(String8("Cookie"), String8(skyjamCookie)); + mUri = String8(mUri.string(), skyjamCookie - mUri.string()); + } + + return OK; +} + +status_t AAH_TXPlayer::setDataSource(int fd, int64_t offset, int64_t length) { + Mutex::Autolock autoLock(mLock); + + reset_l(); + + sp<DataSource> dataSource = new FileSource(dup(fd), offset, length); + + status_t err = dataSource->initCheck(); + + if (err != OK) { + return err; + } + + mFileSource = dataSource; + + sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource); + + if (extractor == NULL) { + return UNKNOWN_ERROR; + } + + return setDataSource_l(extractor); +} + +status_t AAH_TXPlayer::setVideoSurface(const sp<Surface>& surface) { + return OK; +} + +status_t AAH_TXPlayer::setVideoSurfaceTexture( + const sp<ISurfaceTexture>& surfaceTexture) { + return OK; +} + +status_t AAH_TXPlayer::prepare() { + return INVALID_OPERATION; +} + +status_t AAH_TXPlayer::prepareAsync() { + Mutex::Autolock autoLock(mLock); + + return prepareAsync_l(); +} + +status_t AAH_TXPlayer::prepareAsync_l() { + if (mFlags & PREPARING) { + return UNKNOWN_ERROR; // async prepare already pending + } + + mAAH_Sender = AAH_TXSender::GetInstance(); + if (mAAH_Sender == NULL) { + return NO_MEMORY; + } + + if (!mQueueStarted) { + mQueue.start(); + mQueueStarted = true; + } + + mFlags |= PREPARING; + mAsyncPrepareEvent = new AAH_TXEvent( + this, &AAH_TXPlayer::onPrepareAsyncEvent); + + mQueue.postEvent(mAsyncPrepareEvent); + + return OK; +} + +status_t AAH_TXPlayer::finishSetDataSource_l() { + sp<DataSource> dataSource; + + if (!strncasecmp("http://", mUri.string(), 7) || + !strncasecmp("https://", mUri.string(), 8)) { + + mConnectingDataSource = HTTPBase::Create( + (mFlags & INCOGNITO) + ? HTTPBase::kFlagIncognito + : 0); + + mLock.unlock(); + status_t err = mConnectingDataSource->connect(mUri, &mUriHeaders); + mLock.lock(); + + if (err != OK) { + mConnectingDataSource.clear(); + + ALOGI("mConnectingDataSource->connect() returned %d", err); + return err; + } + + mCachedSource = new NuCachedSource2(mConnectingDataSource); + mConnectingDataSource.clear(); + + dataSource = mCachedSource; + + // We're going to prefill the cache before trying to instantiate + // the extractor below, as the latter is an operation that otherwise + // could block on the datasource for a significant amount of time. + // During that time we'd be unable to abort the preparation phase + // without this prefill. + + mLock.unlock(); + + for (;;) { + status_t finalStatus; + size_t cachedDataRemaining = + mCachedSource->approxDataRemaining(&finalStatus); + + if (finalStatus != OK || + cachedDataRemaining >= kHighWaterMarkBytes || + (mFlags & PREPARE_CANCELLED)) { + break; + } + + usleep(200000); + } + + mLock.lock(); + + if (mFlags & PREPARE_CANCELLED) { + ALOGI("Prepare cancelled while waiting for initial cache fill."); + return UNKNOWN_ERROR; + } + } else { + dataSource = DataSource::CreateFromURI(mUri.string(), &mUriHeaders); + } + + if (dataSource == NULL) { + return UNKNOWN_ERROR; + } + + sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource); + + if (extractor == NULL) { + return UNKNOWN_ERROR; + } + + return setDataSource_l(extractor); +} + +status_t AAH_TXPlayer::setDataSource_l(const sp<MediaExtractor> &extractor) { + // Attempt to approximate overall stream bitrate by summing all + // tracks' individual bitrates, if not all of them advertise bitrate, + // we have to fail. + + int64_t totalBitRate = 0; + + for (size_t i = 0; i < extractor->countTracks(); ++i) { + sp<MetaData> meta = extractor->getTrackMetaData(i); + + int32_t bitrate; + if (!meta->findInt32(kKeyBitRate, &bitrate)) { + totalBitRate = -1; + break; + } + + totalBitRate += bitrate; + } + + mBitrate = totalBitRate; + + ALOGV("mBitrate = %lld bits/sec", mBitrate); + + bool haveAudio = false; + for (size_t i = 0; i < extractor->countTracks(); ++i) { + sp<MetaData> meta = extractor->getTrackMetaData(i); + + const char *mime; + CHECK(meta->findCString(kKeyMIMEType, &mime)); + + if (!strncasecmp(mime, "audio/", 6)) { + mAudioSource = extractor->getTrack(i); + CHECK(mAudioSource != NULL); + haveAudio = true; + break; + } + } + + if (!haveAudio) { + return UNKNOWN_ERROR; + } + + mExtractorFlags = extractor->flags(); + + return OK; +} + +void AAH_TXPlayer::abortPrepare(status_t err) { + CHECK(err != OK); + + notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err); + + mPrepareResult = err; + mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED); + mPreparedCondition.broadcast(); +} + +void AAH_TXPlayer::onPrepareAsyncEvent() { + Mutex::Autolock autoLock(mLock); + + if (mFlags & PREPARE_CANCELLED) { + ALOGI("prepare was cancelled before doing anything"); + abortPrepare(UNKNOWN_ERROR); + return; + } + + if (mUri.size() > 0) { + status_t err = finishSetDataSource_l(); + + if (err != OK) { + abortPrepare(err); + return; + } + } + + mAudioFormat = mAudioSource->getFormat(); + if (!mAudioFormat->findInt64(kKeyDuration, &mDurationUs)) + mDurationUs = 1; + + const char* mime_type = NULL; + if (!mAudioFormat->findCString(kKeyMIMEType, &mime_type)) { + ALOGE("Failed to find audio substream MIME type during prepare."); + abortPrepare(BAD_VALUE); + return; + } + + if (!strcmp(mime_type, MEDIA_MIMETYPE_AUDIO_MPEG)) { + mAudioCodec = TRTPAudioPacket::kCodecMPEG1Audio; + } else + if (!strcmp(mime_type, MEDIA_MIMETYPE_AUDIO_AAC)) { + mAudioCodec = TRTPAudioPacket::kCodecAACAudio; + + uint32_t type; + int32_t sample_rate; + int32_t channel_count; + const void* esds_data; + size_t esds_len; + + if (!mAudioFormat->findInt32(kKeySampleRate, &sample_rate)) { + ALOGE("Failed to find sample rate for AAC substream."); + abortPrepare(BAD_VALUE); + return; + } + + if (!mAudioFormat->findInt32(kKeyChannelCount, &channel_count)) { + ALOGE("Failed to find channel count for AAC substream."); + abortPrepare(BAD_VALUE); + return; + } + + if (!mAudioFormat->findData(kKeyESDS, &type, &esds_data, &esds_len)) { + ALOGE("Failed to find codec init data for AAC substream."); + abortPrepare(BAD_VALUE); + return; + } + + CHECK(NULL == mAudioCodecData); + mAudioCodecDataSize = esds_len + + sizeof(sample_rate) + + sizeof(channel_count); + mAudioCodecData = new uint8_t[mAudioCodecDataSize]; + if (NULL == mAudioCodecData) { + ALOGE("Failed to allocate %u bytes for AAC substream codec aux" + " data.", mAudioCodecDataSize); + mAudioCodecDataSize = 0; + abortPrepare(BAD_VALUE); + return; + } + + uint8_t* tmp = mAudioCodecData; + tmp[0] = static_cast<uint8_t>((sample_rate >> 24) & 0xFF); + tmp[1] = static_cast<uint8_t>((sample_rate >> 16) & 0xFF); + tmp[2] = static_cast<uint8_t>((sample_rate >> 8) & 0xFF); + tmp[3] = static_cast<uint8_t>((sample_rate ) & 0xFF); + tmp[4] = static_cast<uint8_t>((channel_count >> 24) & 0xFF); + tmp[5] = static_cast<uint8_t>((channel_count >> 16) & 0xFF); + tmp[6] = static_cast<uint8_t>((channel_count >> 8) & 0xFF); + tmp[7] = static_cast<uint8_t>((channel_count ) & 0xFF); + + memcpy(tmp + 8, esds_data, esds_len); + } else { + ALOGE("Unsupported MIME type \"%s\" in audio substream", mime_type); + abortPrepare(BAD_VALUE); + return; + } + + status_t err = mAudioSource->start(); + if (err != OK) { + ALOGI("failed to start audio source, err=%d", err); + abortPrepare(err); + return; + } + + mFlags |= PREPARING_CONNECTED; + + if (mCachedSource != NULL) { + postBufferingEvent_l(); + } else { + finishAsyncPrepare_l(); + } +} + +void AAH_TXPlayer::finishAsyncPrepare_l() { + notifyListener_l(MEDIA_PREPARED); + + mPrepareResult = OK; + mFlags &= ~(PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED); + mFlags |= PREPARED; + mPreparedCondition.broadcast(); +} + +status_t AAH_TXPlayer::start() { + Mutex::Autolock autoLock(mLock); + + mFlags &= ~CACHE_UNDERRUN; + + return play_l(); +} + +status_t AAH_TXPlayer::play_l() { + if (mFlags & PLAYING) { + return OK; + } + + if (!(mFlags & PREPARED)) { + return INVALID_OPERATION; + } + + { + Mutex::Autolock lock(mEndpointLock); + if (!mEndpointValid) { + return INVALID_OPERATION; + } + if (!mEndpointRegistered) { + mProgramID = mAAH_Sender->registerEndpoint(mEndpoint); + mEndpointRegistered = true; + } + } + + mFlags |= PLAYING; + + updateClockTransform_l(false); + + postPumpAudioEvent_l(-1); + + return OK; +} + +status_t AAH_TXPlayer::stop() { + status_t ret = pause(); + sendEOS_l(); + return ret; +} + +status_t AAH_TXPlayer::pause() { + Mutex::Autolock autoLock(mLock); + + mFlags &= ~CACHE_UNDERRUN; + + return pause_l(); +} + +status_t AAH_TXPlayer::pause_l(bool doClockUpdate) { + if (!(mFlags & PLAYING)) { + return OK; + } + + cancelPlayerEvents(true /* keepBufferingGoing */); + + mFlags &= ~PLAYING; + + if (doClockUpdate) { + updateClockTransform_l(true); + } + + return OK; +} + +void AAH_TXPlayer::updateClockTransform_l(bool pause) { + // record the new pause status so that onPumpAudio knows what rate to apply + // when it initializes the transform + mPlayRateIsPaused = pause; + + // if we haven't yet established a valid clock transform, then we can't + // do anything here + if (!mCurrentClockTransformValid) { + return; + } + + // sample the current common time + int64_t commonTimeNow; + if (OK != mCCHelper.getCommonTime(&commonTimeNow)) { + ALOGE("updateClockTransform_l get common time failed"); + mCurrentClockTransformValid = false; + return; + } + + // convert the current common time to media time using the old + // transform + int64_t mediaTimeNow; + if (!mCurrentClockTransform.doReverseTransform( + commonTimeNow, &mediaTimeNow)) { + ALOGE("updateClockTransform_l reverse transform failed"); + mCurrentClockTransformValid = false; + return; + } + + // calculate a new transform that preserves the old transform's + // result for the current time + mCurrentClockTransform.a_zero = mediaTimeNow; + mCurrentClockTransform.b_zero = commonTimeNow; + mCurrentClockTransform.a_to_b_numer = 1; + mCurrentClockTransform.a_to_b_denom = pause ? 0 : 1; + + // send a packet announcing the new transform + sp<TRTPControlPacket> packet = new TRTPControlPacket(); + packet->setClockTransform(mCurrentClockTransform); + packet->setCommandID(TRTPControlPacket::kCommandNop); + queuePacketToSender_l(packet); +} + +void AAH_TXPlayer::sendEOS_l() { + sp<TRTPControlPacket> packet = new TRTPControlPacket(); + packet->setCommandID(TRTPControlPacket::kCommandEOS); + queuePacketToSender_l(packet); +} + +bool AAH_TXPlayer::isPlaying() { + return (mFlags & PLAYING) || (mFlags & CACHE_UNDERRUN); +} + +status_t AAH_TXPlayer::seekTo(int msec) { + if (mExtractorFlags & MediaExtractor::CAN_SEEK) { + Mutex::Autolock autoLock(mLock); + return seekTo_l(static_cast<int64_t>(msec) * 1000); + } + + notifyListener_l(MEDIA_SEEK_COMPLETE); + return OK; +} + +status_t AAH_TXPlayer::seekTo_l(int64_t timeUs) { + mIsSeeking = true; + mSeekTimeUs = timeUs; + + mCurrentClockTransformValid = false; + mLastQueuedMediaTimePTSValid = false; + + // send a flush command packet + sp<TRTPControlPacket> packet = new TRTPControlPacket(); + packet->setCommandID(TRTPControlPacket::kCommandFlush); + queuePacketToSender_l(packet); + + return OK; +} + +status_t AAH_TXPlayer::getCurrentPosition(int *msec) { + if (!msec) { + return BAD_VALUE; + } + + Mutex::Autolock lock(mLock); + + int position; + + if (mIsSeeking) { + position = mSeekTimeUs / 1000; + } else if (mCurrentClockTransformValid) { + // sample the current common time + int64_t commonTimeNow; + if (OK != mCCHelper.getCommonTime(&commonTimeNow)) { + ALOGE("getCurrentPosition get common time failed"); + return INVALID_OPERATION; + } + + int64_t mediaTimeNow; + if (!mCurrentClockTransform.doReverseTransform(commonTimeNow, + &mediaTimeNow)) { + ALOGE("getCurrentPosition reverse transform failed"); + return INVALID_OPERATION; + } + + position = static_cast<int>(mediaTimeNow / 1000); + } else { + position = 0; + } + + int duration; + if (getDuration_l(&duration) == OK) { + *msec = clamp(position, 0, duration); + } else { + *msec = (position >= 0) ? position : 0; + } + + return OK; +} + +status_t AAH_TXPlayer::getDuration(int* msec) { + if (!msec) { + return BAD_VALUE; + } + + Mutex::Autolock lock(mLock); + + return getDuration_l(msec); +} + +status_t AAH_TXPlayer::getDuration_l(int* msec) { + if (mDurationUs < 0) { + return UNKNOWN_ERROR; + } + + *msec = (mDurationUs + 500) / 1000; + + return OK; +} + +status_t AAH_TXPlayer::reset() { + Mutex::Autolock autoLock(mLock); + reset_l(); + return OK; +} + +void AAH_TXPlayer::reset_l() { + if (mFlags & PREPARING) { + mFlags |= PREPARE_CANCELLED; + if (mConnectingDataSource != NULL) { + ALOGI("interrupting the connection process"); + mConnectingDataSource->disconnect(); + } + + if (mFlags & PREPARING_CONNECTED) { + // We are basically done preparing, we're just buffering + // enough data to start playback, we can safely interrupt that. + finishAsyncPrepare_l(); + } + } + + while (mFlags & PREPARING) { + mPreparedCondition.wait(mLock); + } + + cancelPlayerEvents(); + + sendEOS_l(); + + mCachedSource.clear(); + + if (mAudioSource != NULL) { + mAudioSource->stop(); + } + mAudioSource.clear(); + mAudioCodec = TRTPAudioPacket::kCodecInvalid; + mAudioFormat = NULL; + delete[] mAudioCodecData; + mAudioCodecData = NULL; + mAudioCodecDataSize = 0; + + mFlags = 0; + mExtractorFlags = 0; + + mDurationUs = -1; + mIsSeeking = false; + mSeekTimeUs = 0; + + mUri.setTo(""); + mUriHeaders.clear(); + + mFileSource.clear(); + + mBitrate = -1; + + { + Mutex::Autolock lock(mEndpointLock); + if (mAAH_Sender != NULL && mEndpointRegistered) { + mAAH_Sender->unregisterEndpoint(mEndpoint); + } + mEndpointRegistered = false; + mEndpointValid = false; + } + + mProgramID = 0; + + mAAH_Sender.clear(); + mLastQueuedMediaTimePTSValid = false; + mCurrentClockTransformValid = false; + mPlayRateIsPaused = false; + + mTRTPVolume = 255; +} + +status_t AAH_TXPlayer::setLooping(int loop) { + return OK; +} + +player_type AAH_TXPlayer::playerType() { + return AAH_TX_PLAYER; +} + +status_t AAH_TXPlayer::setParameter(int key, const Parcel &request) { + return ERROR_UNSUPPORTED; +} + +status_t AAH_TXPlayer::getParameter(int key, Parcel *reply) { + return ERROR_UNSUPPORTED; +} + +status_t AAH_TXPlayer::invoke(const Parcel& request, Parcel *reply) { + return INVALID_OPERATION; +} + +status_t AAH_TXPlayer::getMetadata(const media::Metadata::Filter& ids, + Parcel* records) { + using media::Metadata; + + Metadata metadata(records); + + metadata.appendBool(Metadata::kPauseAvailable, true); + metadata.appendBool(Metadata::kSeekBackwardAvailable, false); + metadata.appendBool(Metadata::kSeekForwardAvailable, false); + metadata.appendBool(Metadata::kSeekAvailable, false); + + return OK; +} + +status_t AAH_TXPlayer::setVolume(float leftVolume, float rightVolume) { + if (leftVolume != rightVolume) { + ALOGE("%s does not support per channel volume: %f, %f", + __PRETTY_FUNCTION__, leftVolume, rightVolume); + } + + float volume = clamp(leftVolume, 0.0f, 1.0f); + + Mutex::Autolock lock(mLock); + mTRTPVolume = static_cast<uint8_t>((leftVolume * 255.0) + 0.5); + + return OK; +} + +status_t AAH_TXPlayer::setAudioStreamType(audio_stream_type_t streamType) { + return OK; +} + +status_t AAH_TXPlayer::setRetransmitEndpoint( + const struct sockaddr_in* endpoint) { + Mutex::Autolock lock(mLock); + + if (NULL == endpoint) + return BAD_VALUE; + + // Once the endpoint has been registered, it may not be changed. + if (mEndpointRegistered) + return INVALID_OPERATION; + + mEndpoint.addr = endpoint->sin_addr.s_addr; + mEndpoint.port = endpoint->sin_port; + mEndpointValid = true; + + return OK; +} + +void AAH_TXPlayer::notifyListener_l(int msg, int ext1, int ext2) { + sendEvent(msg, ext1, ext2); +} + +bool AAH_TXPlayer::getBitrate_l(int64_t *bitrate) { + off64_t size; + if (mDurationUs >= 0 && + mCachedSource != NULL && + mCachedSource->getSize(&size) == OK) { + *bitrate = size * 8000000ll / mDurationUs; // in bits/sec + return true; + } + + if (mBitrate >= 0) { + *bitrate = mBitrate; + return true; + } + + *bitrate = 0; + + return false; +} + +// Returns true iff cached duration is available/applicable. +bool AAH_TXPlayer::getCachedDuration_l(int64_t *durationUs, bool *eos) { + int64_t bitrate; + + if (mCachedSource != NULL && getBitrate_l(&bitrate)) { + status_t finalStatus; + size_t cachedDataRemaining = mCachedSource->approxDataRemaining( + &finalStatus); + *durationUs = cachedDataRemaining * 8000000ll / bitrate; + *eos = (finalStatus != OK); + return true; + } + + return false; +} + +void AAH_TXPlayer::ensureCacheIsFetching_l() { + if (mCachedSource != NULL) { + mCachedSource->resumeFetchingIfNecessary(); + } +} + +void AAH_TXPlayer::postBufferingEvent_l() { + if (mBufferingEventPending) { + return; + } + mBufferingEventPending = true; + mQueue.postEventWithDelay(mBufferingEvent, 1000000ll); +} + +void AAH_TXPlayer::postPumpAudioEvent_l(int64_t delayUs) { + if (mPumpAudioEventPending) { + return; + } + mPumpAudioEventPending = true; + mQueue.postEventWithDelay(mPumpAudioEvent, delayUs < 0 ? 10000 : delayUs); +} + +void AAH_TXPlayer::onBufferingUpdate() { + Mutex::Autolock autoLock(mLock); + if (!mBufferingEventPending) { + return; + } + mBufferingEventPending = false; + + if (mCachedSource != NULL) { + status_t finalStatus; + size_t cachedDataRemaining = mCachedSource->approxDataRemaining( + &finalStatus); + bool eos = (finalStatus != OK); + + if (eos) { + if (finalStatus == ERROR_END_OF_STREAM) { + notifyListener_l(MEDIA_BUFFERING_UPDATE, 100); + } + if (mFlags & PREPARING) { + ALOGV("cache has reached EOS, prepare is done."); + finishAsyncPrepare_l(); + } + } else { + int64_t bitrate; + if (getBitrate_l(&bitrate)) { + size_t cachedSize = mCachedSource->cachedSize(); + int64_t cachedDurationUs = cachedSize * 8000000ll / bitrate; + + int percentage = (100.0 * (double) cachedDurationUs) + / mDurationUs; + if (percentage > 100) { + percentage = 100; + } + + notifyListener_l(MEDIA_BUFFERING_UPDATE, percentage); + } else { + // We don't know the bitrate of the stream, use absolute size + // limits to maintain the cache. + + if ((mFlags & PLAYING) && + !eos && + (cachedDataRemaining < kLowWaterMarkBytes)) { + ALOGI("cache is running low (< %d) , pausing.", + kLowWaterMarkBytes); + mFlags |= CACHE_UNDERRUN; + pause_l(); + ensureCacheIsFetching_l(); + notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_START); + } else if (eos || cachedDataRemaining > kHighWaterMarkBytes) { + if (mFlags & CACHE_UNDERRUN) { + ALOGI("cache has filled up (> %d), resuming.", + kHighWaterMarkBytes); + mFlags &= ~CACHE_UNDERRUN; + play_l(); + notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_END); + } else if (mFlags & PREPARING) { + ALOGV("cache has filled up (> %d), prepare is done", + kHighWaterMarkBytes); + finishAsyncPrepare_l(); + } + } + } + } + } + + int64_t cachedDurationUs; + bool eos; + if (getCachedDuration_l(&cachedDurationUs, &eos)) { + ALOGV("cachedDurationUs = %.2f secs, eos=%d", + cachedDurationUs / 1E6, eos); + + if ((mFlags & PLAYING) && + !eos && + (cachedDurationUs < kLowWaterMarkUs)) { + ALOGI("cache is running low (%.2f secs) , pausing.", + cachedDurationUs / 1E6); + mFlags |= CACHE_UNDERRUN; + pause_l(); + ensureCacheIsFetching_l(); + notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_START); + } else if (eos || cachedDurationUs > kHighWaterMarkUs) { + if (mFlags & CACHE_UNDERRUN) { + ALOGI("cache has filled up (%.2f secs), resuming.", + cachedDurationUs / 1E6); + mFlags &= ~CACHE_UNDERRUN; + play_l(); + notifyListener_l(MEDIA_INFO, MEDIA_INFO_BUFFERING_END); + } else if (mFlags & PREPARING) { + ALOGV("cache has filled up (%.2f secs), prepare is done", + cachedDurationUs / 1E6); + finishAsyncPrepare_l(); + } + } + } + + postBufferingEvent_l(); +} + +void AAH_TXPlayer::onPumpAudio() { + while (true) { + Mutex::Autolock autoLock(mLock); + // If this flag is clear, its because someone has externally canceled + // this pump operation (probably because we a resetting/shutting down). + // Get out immediately, do not reschedule ourselves. + if (!mPumpAudioEventPending) { + return; + } + + // Start by checking if there is still work to be doing. If we have + // never queued a payload (so we don't know what the last queued PTS is) + // or we have never established a MediaTime->CommonTime transformation, + // then we have work to do (one time through this loop should establish + // both). Otherwise, we want to keep a fixed amt of presentation time + // worth of data buffered. If we cannot get common time (service is + // unavailable, or common time is undefined)) then we don't have a lot + // of good options here. For now, signal an error up to the app level + // and shut down the transmission pump. + int64_t commonTimeNow; + if (OK != mCCHelper.getCommonTime(&commonTimeNow)) { + // Failed to get common time; either the service is down or common + // time is not synced. Raise an error and shutdown the player. + ALOGE("*** Cannot pump audio, unable to fetch common time." + " Shutting down."); + notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, UNKNOWN_ERROR); + mPumpAudioEventPending = false; + break; + } + + if (mCurrentClockTransformValid && mLastQueuedMediaTimePTSValid) { + int64_t mediaTimeNow; + bool conversionResult = mCurrentClockTransform.doReverseTransform( + commonTimeNow, + &mediaTimeNow); + CHECK(conversionResult); + + if ((mediaTimeNow + + kAAHBufferTimeUs - + mLastQueuedMediaTimePTS) <= 0) { + break; + } + } + + MediaSource::ReadOptions options; + if (mIsSeeking) { + options.setSeekTo(mSeekTimeUs); + } + + MediaBuffer* mediaBuffer; + status_t err = mAudioSource->read(&mediaBuffer, &options); + if (err != NO_ERROR) { + if (err == ERROR_END_OF_STREAM) { + ALOGI("*** %s reached end of stream", __PRETTY_FUNCTION__); + notifyListener_l(MEDIA_BUFFERING_UPDATE, 100); + notifyListener_l(MEDIA_PLAYBACK_COMPLETE); + pause_l(false); + sendEOS_l(); + } else { + ALOGE("*** %s read failed err=%d", __PRETTY_FUNCTION__, err); + } + return; + } + + if (mIsSeeking) { + mIsSeeking = false; + notifyListener_l(MEDIA_SEEK_COMPLETE); + } + + uint8_t* data = (static_cast<uint8_t*>(mediaBuffer->data()) + + mediaBuffer->range_offset()); + ALOGV("*** %s got media buffer data=[%02hhx %02hhx %02hhx %02hhx]" + " offset=%d length=%d", __PRETTY_FUNCTION__, + data[0], data[1], data[2], data[3], + mediaBuffer->range_offset(), mediaBuffer->range_length()); + + int64_t mediaTimeUs; + CHECK(mediaBuffer->meta_data()->findInt64(kKeyTime, &mediaTimeUs)); + ALOGV("*** timeUs=%lld", mediaTimeUs); + + if (!mCurrentClockTransformValid) { + if (OK == mCCHelper.getCommonTime(&commonTimeNow)) { + mCurrentClockTransform.a_zero = mediaTimeUs; + mCurrentClockTransform.b_zero = commonTimeNow + + kAAHStartupLeadTimeUs; + mCurrentClockTransform.a_to_b_numer = 1; + mCurrentClockTransform.a_to_b_denom = mPlayRateIsPaused ? 0 : 1; + mCurrentClockTransformValid = true; + } else { + // Failed to get common time; either the service is down or + // common time is not synced. Raise an error and shutdown the + // player. + ALOGE("*** Cannot begin transmission, unable to fetch common" + " time. Dropping sample with pts=%lld", mediaTimeUs); + notifyListener_l(MEDIA_ERROR, + MEDIA_ERROR_UNKNOWN, + UNKNOWN_ERROR); + mPumpAudioEventPending = false; + break; + } + } + + ALOGV("*** transmitting packet with pts=%lld", mediaTimeUs); + + sp<TRTPAudioPacket> packet = new TRTPAudioPacket(); + packet->setPTS(mediaTimeUs); + packet->setSubstreamID(1); + + packet->setCodecType(mAudioCodec); + packet->setVolume(mTRTPVolume); + // TODO : introduce a throttle for this so we can control the + // frequency with which transforms get sent. + packet->setClockTransform(mCurrentClockTransform); + packet->setAccessUnitData(data, mediaBuffer->range_length()); + + // TODO : while its pretty much universally true that audio ES payloads + // are all RAPs across all codecs, it might be a good idea to throttle + // the frequency with which we send codec out of band data to the RXers. + // If/when we do, we need to flag only those payloads which have + // required out of band data attached to them as RAPs. + packet->setRandomAccessPoint(true); + + if (mAudioCodecData && mAudioCodecDataSize) { + packet->setAuxData(mAudioCodecData, mAudioCodecDataSize); + } + + queuePacketToSender_l(packet); + mediaBuffer->release(); + + mLastQueuedMediaTimePTSValid = true; + mLastQueuedMediaTimePTS = mediaTimeUs; + } + + { // Explicit scope for the autolock pattern. + Mutex::Autolock autoLock(mLock); + + // If someone externally has cleared this flag, its because we should be + // shutting down. Do not reschedule ourselves. + if (!mPumpAudioEventPending) { + return; + } + + // Looks like no one canceled us explicitly. Clear our flag and post a + // new event to ourselves. + mPumpAudioEventPending = false; + postPumpAudioEvent_l(10000); + } +} + +void AAH_TXPlayer::queuePacketToSender_l(const sp<TRTPPacket>& packet) { + if (mAAH_Sender == NULL) { + return; + } + + sp<AMessage> message = new AMessage(AAH_TXSender::kWhatSendPacket, + mAAH_Sender->handlerID()); + + { + Mutex::Autolock lock(mEndpointLock); + if (!mEndpointValid) { + return; + } + + message->setInt32(AAH_TXSender::kSendPacketIPAddr, mEndpoint.addr); + message->setInt32(AAH_TXSender::kSendPacketPort, mEndpoint.port); + } + + packet->setProgramID(mProgramID); + packet->setExpireTime(systemTime() + kAAHRetryKeepAroundTimeNs); + packet->pack(); + + message->setObject(AAH_TXSender::kSendPacketTRTPPacket, packet); + + message->post(); +} + +} // namespace android diff --git a/media/libaah_rtp/aah_tx_player.h b/media/libaah_rtp/aah_tx_player.h new file mode 100644 index 0000000..2e4b1f7 --- /dev/null +++ b/media/libaah_rtp/aah_tx_player.h @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AAH_TX_PLAYER_H__ +#define __AAH_TX_PLAYER_H__ + +#include <common_time/cc_helper.h> +#include <libstagefright/include/HTTPBase.h> +#include <libstagefright/include/NuCachedSource2.h> +#include <libstagefright/include/TimedEventQueue.h> +#include <media/MediaPlayerInterface.h> +#include <media/stagefright/MediaExtractor.h> +#include <media/stagefright/MediaSource.h> +#include <utils/LinearTransform.h> +#include <utils/String8.h> +#include <utils/threads.h> + +#include "aah_tx_sender.h" + +namespace android { + +class AAH_TXPlayer : public MediaPlayerHWInterface { + public: + AAH_TXPlayer(); + + virtual status_t initCheck(); + virtual status_t setDataSource(const char *url, + const KeyedVector<String8, String8>* + headers); + virtual status_t setDataSource(int fd, int64_t offset, int64_t length); + virtual status_t setVideoSurface(const sp<Surface>& surface); + virtual status_t setVideoSurfaceTexture(const sp<ISurfaceTexture>& + surfaceTexture); + virtual status_t prepare(); + virtual status_t prepareAsync(); + virtual status_t start(); + virtual status_t stop(); + virtual status_t pause(); + virtual bool isPlaying(); + virtual status_t seekTo(int msec); + virtual status_t getCurrentPosition(int *msec); + virtual status_t getDuration(int *msec); + virtual status_t reset(); + virtual status_t setLooping(int loop); + virtual player_type playerType(); + virtual status_t setParameter(int key, const Parcel &request); + virtual status_t getParameter(int key, Parcel *reply); + virtual status_t invoke(const Parcel& request, Parcel *reply); + virtual status_t getMetadata(const media::Metadata::Filter& ids, + Parcel* records); + virtual status_t setVolume(float leftVolume, float rightVolume); + virtual status_t setAudioStreamType(audio_stream_type_t streamType); + virtual status_t setRetransmitEndpoint( + const struct sockaddr_in* endpoint); + + static const int64_t kAAHRetryKeepAroundTimeNs; + + protected: + virtual ~AAH_TXPlayer(); + + private: + friend struct AwesomeEvent; + + enum { + PLAYING = 1, + PREPARING = 8, + PREPARED = 16, + PREPARE_CANCELLED = 64, + CACHE_UNDERRUN = 128, + + // We are basically done preparing but are currently buffering + // sufficient data to begin playback and finish the preparation + // phase for good. + PREPARING_CONNECTED = 2048, + + INCOGNITO = 32768, + }; + + status_t setDataSource_l(const char *url, + const KeyedVector<String8, String8> *headers); + status_t setDataSource_l(const sp<MediaExtractor>& extractor); + status_t finishSetDataSource_l(); + status_t prepareAsync_l(); + void onPrepareAsyncEvent(); + void finishAsyncPrepare_l(); + void abortPrepare(status_t err); + status_t play_l(); + status_t pause_l(bool doClockUpdate = true); + status_t seekTo_l(int64_t timeUs); + void updateClockTransform_l(bool pause); + void sendEOS_l(); + void cancelPlayerEvents(bool keepBufferingGoing = false); + void reset_l(); + void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0); + bool getBitrate_l(int64_t* bitrate); + status_t getDuration_l(int* msec); + bool getCachedDuration_l(int64_t* durationUs, bool* eos); + void ensureCacheIsFetching_l(); + void postBufferingEvent_l(); + void postPumpAudioEvent_l(int64_t delayUs); + void onBufferingUpdate(); + void onPumpAudio(); + void queuePacketToSender_l(const sp<TRTPPacket>& packet); + + Mutex mLock; + + TimedEventQueue mQueue; + bool mQueueStarted; + + sp<TimedEventQueue::Event> mBufferingEvent; + bool mBufferingEventPending; + + uint32_t mFlags; + uint32_t mExtractorFlags; + + String8 mUri; + KeyedVector<String8, String8> mUriHeaders; + + sp<DataSource> mFileSource; + + sp<TimedEventQueue::Event> mAsyncPrepareEvent; + Condition mPreparedCondition; + status_t mPrepareResult; + + bool mIsSeeking; + int64_t mSeekTimeUs; + + sp<TimedEventQueue::Event> mPumpAudioEvent; + bool mPumpAudioEventPending; + + sp<HTTPBase> mConnectingDataSource; + sp<NuCachedSource2> mCachedSource; + + sp<MediaSource> mAudioSource; + TRTPAudioPacket::TRTPAudioCodecType mAudioCodec; + sp<MetaData> mAudioFormat; + uint8_t* mAudioCodecData; + size_t mAudioCodecDataSize; + + int64_t mDurationUs; + int64_t mBitrate; + + sp<AAH_TXSender> mAAH_Sender; + LinearTransform mCurrentClockTransform; + bool mCurrentClockTransformValid; + int64_t mLastQueuedMediaTimePTS; + bool mLastQueuedMediaTimePTSValid; + bool mPlayRateIsPaused; + CCHelper mCCHelper; + + Mutex mEndpointLock; + AAH_TXSender::Endpoint mEndpoint; + bool mEndpointValid; + bool mEndpointRegistered; + uint16_t mProgramID; + uint8_t mTRTPVolume; + + DISALLOW_EVIL_CONSTRUCTORS(AAH_TXPlayer); +}; + +} // namespace android + +#endif // __AAH_TX_PLAYER_H__ diff --git a/media/libaah_rtp/aah_tx_sender.cpp b/media/libaah_rtp/aah_tx_sender.cpp new file mode 100644 index 0000000..08e32d2 --- /dev/null +++ b/media/libaah_rtp/aah_tx_sender.cpp @@ -0,0 +1,603 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "LibAAH_RTP" +#include <media/stagefright/foundation/ADebug.h> + +#include <netinet/in.h> +#include <poll.h> +#include <sys/types.h> +#include <sys/socket.h> +#include <unistd.h> + +#include <media/stagefright/foundation/AMessage.h> +#include <utils/misc.h> + +#include "aah_tx_player.h" +#include "aah_tx_sender.h" + +namespace android { + +const char* AAH_TXSender::kSendPacketIPAddr = "ipaddr"; +const char* AAH_TXSender::kSendPacketPort = "port"; +const char* AAH_TXSender::kSendPacketTRTPPacket = "trtp"; + +const int AAH_TXSender::kRetryTrimIntervalUs = 100000; +const int AAH_TXSender::kHeartbeatIntervalUs = 1000000; +const int AAH_TXSender::kRetryBufferCapacity = 100; +const nsecs_t AAH_TXSender::kHeartbeatTimeout = 600ull * 1000000000ull; + +Mutex AAH_TXSender::sLock; +wp<AAH_TXSender> AAH_TXSender::sInstance; +uint32_t AAH_TXSender::sNextEpoch; +bool AAH_TXSender::sNextEpochValid = false; + +AAH_TXSender::AAH_TXSender() : mSocket(-1) { + mLastSentPacketTime = systemTime(); +} + +sp<AAH_TXSender> AAH_TXSender::GetInstance() { + Mutex::Autolock autoLock(sLock); + + sp<AAH_TXSender> sender = sInstance.promote(); + + if (sender == NULL) { + sender = new AAH_TXSender(); + if (sender == NULL) { + return NULL; + } + + sender->mLooper = new ALooper(); + if (sender->mLooper == NULL) { + return NULL; + } + + sender->mReflector = new AHandlerReflector<AAH_TXSender>(sender.get()); + if (sender->mReflector == NULL) { + return NULL; + } + + sender->mSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP); + if (sender->mSocket == -1) { + ALOGW("%s unable to create socket", __PRETTY_FUNCTION__); + return NULL; + } + + struct sockaddr_in bind_addr; + memset(&bind_addr, 0, sizeof(bind_addr)); + bind_addr.sin_family = AF_INET; + if (bind(sender->mSocket, + reinterpret_cast<const sockaddr*>(&bind_addr), + sizeof(bind_addr)) < 0) { + ALOGW("%s unable to bind socket (errno %d)", + __PRETTY_FUNCTION__, errno); + return NULL; + } + + sender->mRetryReceiver = new RetryReceiver(sender.get()); + if (sender->mRetryReceiver == NULL) { + return NULL; + } + + sender->mLooper->setName("AAH_TXSender"); + sender->mLooper->registerHandler(sender->mReflector); + sender->mLooper->start(false, false, PRIORITY_AUDIO); + + if (sender->mRetryReceiver->run("AAH_TXSenderRetry", PRIORITY_AUDIO) + != OK) { + ALOGW("%s unable to start retry thread", __PRETTY_FUNCTION__); + return NULL; + } + + sInstance = sender; + } + + return sender; +} + +AAH_TXSender::~AAH_TXSender() { + mLooper->stop(); + mLooper->unregisterHandler(mReflector->id()); + + if (mRetryReceiver != NULL) { + mRetryReceiver->requestExit(); + mRetryReceiver->mWakeupEvent.setEvent(); + if (mRetryReceiver->requestExitAndWait() != OK) { + ALOGW("%s shutdown of retry receiver failed", __PRETTY_FUNCTION__); + } + mRetryReceiver->mSender = NULL; + mRetryReceiver.clear(); + } + + if (mSocket != -1) { + close(mSocket); + } +} + +// Return the next epoch number usable for a newly instantiated endpoint. +uint32_t AAH_TXSender::getNextEpoch() { + Mutex::Autolock autoLock(sLock); + + if (sNextEpochValid) { + sNextEpoch = (sNextEpoch + 1) & TRTPPacket::kTRTPEpochMask; + } else { + sNextEpoch = ns2ms(systemTime()) & TRTPPacket::kTRTPEpochMask; + sNextEpochValid = true; + } + + return sNextEpoch; +} + +// Notify the sender that a player has started sending to this endpoint. +// Returns a program ID for use by the calling player. +uint16_t AAH_TXSender::registerEndpoint(const Endpoint& endpoint) { + Mutex::Autolock lock(mEndpointLock); + + EndpointState* eps = mEndpointMap.valueFor(endpoint); + if (eps) { + eps->playerRefCount++; + } else { + eps = new EndpointState(getNextEpoch()); + mEndpointMap.add(endpoint, eps); + } + + // if this is the first registered endpoint, then send a message to start + // trimming retry buffers and a message to start sending heartbeats. + if (mEndpointMap.size() == 1) { + sp<AMessage> trimMessage = new AMessage(kWhatTrimRetryBuffers, + handlerID()); + trimMessage->post(kRetryTrimIntervalUs); + + sp<AMessage> heartbeatMessage = new AMessage(kWhatSendHeartbeats, + handlerID()); + heartbeatMessage->post(kHeartbeatIntervalUs); + } + + eps->nextProgramID++; + return eps->nextProgramID; +} + +// Notify the sender that a player has ceased sending to this endpoint. +// An endpoint's state can not be deleted until all of the endpoint's +// registered players have called unregisterEndpoint. +void AAH_TXSender::unregisterEndpoint(const Endpoint& endpoint) { + Mutex::Autolock lock(mEndpointLock); + + EndpointState* eps = mEndpointMap.valueFor(endpoint); + if (eps) { + eps->playerRefCount--; + CHECK(eps->playerRefCount >= 0); + } +} + +void AAH_TXSender::onMessageReceived(const sp<AMessage>& msg) { + switch (msg->what()) { + case kWhatSendPacket: + onSendPacket(msg); + break; + + case kWhatTrimRetryBuffers: + trimRetryBuffers(); + break; + + case kWhatSendHeartbeats: + sendHeartbeats(); + break; + + default: + TRESPASS(); + break; + } +} + +void AAH_TXSender::onSendPacket(const sp<AMessage>& msg) { + sp<RefBase> obj; + CHECK(msg->findObject(kSendPacketTRTPPacket, &obj)); + sp<TRTPPacket> packet = static_cast<TRTPPacket*>(obj.get()); + + uint32_t ipAddr; + CHECK(msg->findInt32(kSendPacketIPAddr, + reinterpret_cast<int32_t*>(&ipAddr))); + + int32_t port32; + CHECK(msg->findInt32(kSendPacketPort, &port32)); + uint16_t port = port32; + + Mutex::Autolock lock(mEndpointLock); + doSendPacket_l(packet, Endpoint(ipAddr, port)); + mLastSentPacketTime = systemTime(); +} + +void AAH_TXSender::doSendPacket_l(const sp<TRTPPacket>& packet, + const Endpoint& endpoint) { + EndpointState* eps = mEndpointMap.valueFor(endpoint); + if (!eps) { + // the endpoint state has disappeared, so the player that sent this + // packet must be dead. + return; + } + + // assign the packet's sequence number + packet->setEpoch(eps->epoch); + packet->setSeqNumber(eps->trtpSeqNumber++); + + // add the packet to the retry buffer + RetryBuffer& retry = eps->retry; + retry.push_back(packet); + + // send the packet + struct sockaddr_in addr; + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = endpoint.addr; + addr.sin_port = endpoint.port; + + ssize_t result = sendto(mSocket, + packet->getPacket(), + packet->getPacketLen(), + 0, + (const struct sockaddr *) &addr, + sizeof(addr)); + if (result == -1) { + ALOGW("%s sendto failed", __PRETTY_FUNCTION__); + } +} + +void AAH_TXSender::trimRetryBuffers() { + Mutex::Autolock lock(mEndpointLock); + + nsecs_t localTimeNow = systemTime(); + + Vector<Endpoint> endpointsToRemove; + + for (size_t i = 0; i < mEndpointMap.size(); i++) { + EndpointState* eps = mEndpointMap.editValueAt(i); + RetryBuffer& retry = eps->retry; + + while (!retry.isEmpty()) { + if (retry[0]->getExpireTime() < localTimeNow) { + retry.pop_front(); + } else { + break; + } + } + + if (retry.isEmpty() && eps->playerRefCount == 0) { + endpointsToRemove.add(mEndpointMap.keyAt(i)); + } + } + + // remove the state for any endpoints that are no longer in use + for (size_t i = 0; i < endpointsToRemove.size(); i++) { + Endpoint& e = endpointsToRemove.editItemAt(i); + ALOGD("*** %s removing endpoint addr=%08x", + __PRETTY_FUNCTION__, e.addr); + size_t index = mEndpointMap.indexOfKey(e); + delete mEndpointMap.valueAt(index); + mEndpointMap.removeItemsAt(index); + } + + // schedule the next trim + if (mEndpointMap.size()) { + sp<AMessage> trimMessage = new AMessage(kWhatTrimRetryBuffers, + handlerID()); + trimMessage->post(kRetryTrimIntervalUs); + } +} + +void AAH_TXSender::sendHeartbeats() { + Mutex::Autolock lock(mEndpointLock); + + if (shouldSendHeartbeats_l()) { + for (size_t i = 0; i < mEndpointMap.size(); i++) { + EndpointState* eps = mEndpointMap.editValueAt(i); + const Endpoint& ep = mEndpointMap.keyAt(i); + + sp<TRTPControlPacket> packet = new TRTPControlPacket(); + packet->setCommandID(TRTPControlPacket::kCommandNop); + + packet->setExpireTime(systemTime() + + AAH_TXPlayer::kAAHRetryKeepAroundTimeNs); + packet->pack(); + + doSendPacket_l(packet, ep); + } + } + + // schedule the next heartbeat + if (mEndpointMap.size()) { + sp<AMessage> heartbeatMessage = new AMessage(kWhatSendHeartbeats, + handlerID()); + heartbeatMessage->post(kHeartbeatIntervalUs); + } +} + +bool AAH_TXSender::shouldSendHeartbeats_l() { + // assert(holding endpoint lock) + return (systemTime() < (mLastSentPacketTime + kHeartbeatTimeout)); +} + +// Receiver + +// initial 4-byte ID of a retry request packet +const uint32_t AAH_TXSender::RetryReceiver::kRetryRequestID = 'Treq'; + +// initial 4-byte ID of a retry NAK packet +const uint32_t AAH_TXSender::RetryReceiver::kRetryNakID = 'Tnak'; + +// initial 4-byte ID of a fast start request packet +const uint32_t AAH_TXSender::RetryReceiver::kFastStartRequestID = 'Tfst'; + +AAH_TXSender::RetryReceiver::RetryReceiver(AAH_TXSender* sender) + : Thread(false), + mSender(sender) {} + + AAH_TXSender::RetryReceiver::~RetryReceiver() { + mWakeupEvent.clearPendingEvents(); + } + +// Returns true if val is within the interval bounded inclusively by +// start and end. Also handles the case where there is a rollover of the +// range between start and end. +template <typename T> +static inline bool withinIntervalWithRollover(T val, T start, T end) { + return ((start <= end && val >= start && val <= end) || + (start > end && (val >= start || val <= end))); +} + +bool AAH_TXSender::RetryReceiver::threadLoop() { + struct pollfd pollFds[2]; + pollFds[0].fd = mSender->mSocket; + pollFds[0].events = POLLIN; + pollFds[0].revents = 0; + pollFds[1].fd = mWakeupEvent.getWakeupHandle(); + pollFds[1].events = POLLIN; + pollFds[1].revents = 0; + + int pollResult = poll(pollFds, NELEM(pollFds), -1); + if (pollResult == -1) { + ALOGE("%s poll failed", __PRETTY_FUNCTION__); + return false; + } + + if (exitPending()) { + ALOGI("*** %s exiting", __PRETTY_FUNCTION__); + return false; + } + + if (pollFds[0].revents) { + handleRetryRequest(); + } + + return true; +} + +void AAH_TXSender::RetryReceiver::handleRetryRequest() { + ALOGV("*** RX %s start", __PRETTY_FUNCTION__); + + RetryPacket request; + struct sockaddr requestSrcAddr; + socklen_t requestSrcAddrLen = sizeof(requestSrcAddr); + + ssize_t result = recvfrom(mSender->mSocket, &request, sizeof(request), 0, + &requestSrcAddr, &requestSrcAddrLen); + if (result == -1) { + ALOGE("%s recvfrom failed, errno=%d", __PRETTY_FUNCTION__, errno); + return; + } + + if (static_cast<size_t>(result) < sizeof(RetryPacket)) { + ALOGW("%s short packet received", __PRETTY_FUNCTION__); + return; + } + + uint32_t host_request_id = ntohl(request.id); + if ((host_request_id != kRetryRequestID) && + (host_request_id != kFastStartRequestID)) { + ALOGW("%s received retry request with bogus ID (%08x)", + __PRETTY_FUNCTION__, host_request_id); + return; + } + + Endpoint endpoint(request.endpointIP, request.endpointPort); + + Mutex::Autolock lock(mSender->mEndpointLock); + + EndpointState* eps = mSender->mEndpointMap.valueFor(endpoint); + + if (eps == NULL || eps->retry.isEmpty()) { + // we have no retry buffer or an empty retry buffer for this endpoint, + // so NAK the entire request + RetryPacket nak = request; + nak.id = htonl(kRetryNakID); + result = sendto(mSender->mSocket, &nak, sizeof(nak), 0, + &requestSrcAddr, requestSrcAddrLen); + if (result == -1) { + ALOGW("%s sendto failed", __PRETTY_FUNCTION__); + } + return; + } + + RetryBuffer& retry = eps->retry; + + uint16_t startSeq = ntohs(request.seqStart); + uint16_t endSeq = ntohs(request.seqEnd); + + uint16_t retryFirstSeq = retry[0]->getSeqNumber(); + uint16_t retryLastSeq = retry[retry.size() - 1]->getSeqNumber(); + + // If this is a fast start, then force the start of the retry to match the + // start of the retransmit ring buffer (unless the end of the retransmit + // ring buffer is already past the point of fast start) + if ((host_request_id == kFastStartRequestID) && + !((startSeq - retryFirstSeq) & 0x8000)) { + startSeq = retryFirstSeq; + } + + int startIndex; + if (withinIntervalWithRollover(startSeq, retryFirstSeq, retryLastSeq)) { + startIndex = static_cast<uint16_t>(startSeq - retryFirstSeq); + } else { + startIndex = -1; + } + + int endIndex; + if (withinIntervalWithRollover(endSeq, retryFirstSeq, retryLastSeq)) { + endIndex = static_cast<uint16_t>(endSeq - retryFirstSeq); + } else { + endIndex = -1; + } + + if (startIndex == -1 && endIndex == -1) { + // no part of the request range is found in the retry buffer + RetryPacket nak = request; + nak.id = htonl(kRetryNakID); + result = sendto(mSender->mSocket, &nak, sizeof(nak), 0, + &requestSrcAddr, requestSrcAddrLen); + if (result == -1) { + ALOGW("%s sendto failed", __PRETTY_FUNCTION__); + } + return; + } + + if (startIndex == -1) { + // NAK a subrange at the front of the request range + RetryPacket nak = request; + nak.id = htonl(kRetryNakID); + nak.seqEnd = htons(retryFirstSeq - 1); + result = sendto(mSender->mSocket, &nak, sizeof(nak), 0, + &requestSrcAddr, requestSrcAddrLen); + if (result == -1) { + ALOGW("%s sendto failed", __PRETTY_FUNCTION__); + } + + startIndex = 0; + } else if (endIndex == -1) { + // NAK a subrange at the back of the request range + RetryPacket nak = request; + nak.id = htonl(kRetryNakID); + nak.seqStart = htons(retryLastSeq + 1); + result = sendto(mSender->mSocket, &nak, sizeof(nak), 0, + &requestSrcAddr, requestSrcAddrLen); + if (result == -1) { + ALOGW("%s sendto failed", __PRETTY_FUNCTION__); + } + + endIndex = retry.size() - 1; + } + + // send the retry packets + for (int i = startIndex; i <= endIndex; i++) { + const sp<TRTPPacket>& replyPacket = retry[i]; + + result = sendto(mSender->mSocket, + replyPacket->getPacket(), + replyPacket->getPacketLen(), + 0, + &requestSrcAddr, + requestSrcAddrLen); + + if (result == -1) { + ALOGW("%s sendto failed", __PRETTY_FUNCTION__); + } + } +} + +// Endpoint + +AAH_TXSender::Endpoint::Endpoint() + : addr(0) + , port(0) { } + +AAH_TXSender::Endpoint::Endpoint(uint32_t a, uint16_t p) + : addr(a) + , port(p) {} + +bool AAH_TXSender::Endpoint::operator<(const Endpoint& other) const { + return ((addr < other.addr) || + (addr == other.addr && port < other.port)); +} + +// EndpointState + +AAH_TXSender::EndpointState::EndpointState(uint32_t _epoch) + : retry(kRetryBufferCapacity) + , playerRefCount(1) + , trtpSeqNumber(0) + , nextProgramID(0) + , epoch(_epoch) { } + +// CircularBuffer + +template <typename T> +CircularBuffer<T>::CircularBuffer(size_t capacity) + : mCapacity(capacity) + , mHead(0) + , mTail(0) + , mFillCount(0) { + mBuffer = new T[capacity]; +} + +template <typename T> +CircularBuffer<T>::~CircularBuffer() { + delete [] mBuffer; +} + +template <typename T> +void CircularBuffer<T>::push_back(const T& item) { + if (this->isFull()) { + this->pop_front(); + } + mBuffer[mHead] = item; + mHead = (mHead + 1) % mCapacity; + mFillCount++; +} + +template <typename T> +void CircularBuffer<T>::pop_front() { + CHECK(!isEmpty()); + mBuffer[mTail] = T(); + mTail = (mTail + 1) % mCapacity; + mFillCount--; +} + +template <typename T> +size_t CircularBuffer<T>::size() const { + return mFillCount; +} + +template <typename T> +bool CircularBuffer<T>::isFull() const { + return (mFillCount == mCapacity); +} + +template <typename T> +bool CircularBuffer<T>::isEmpty() const { + return (mFillCount == 0); +} + +template <typename T> +const T& CircularBuffer<T>::itemAt(size_t index) const { + CHECK(index < mFillCount); + return mBuffer[(mTail + index) % mCapacity]; +} + +template <typename T> +const T& CircularBuffer<T>::operator[](size_t index) const { + return itemAt(index); +} + +} // namespace android diff --git a/media/libaah_rtp/aah_tx_sender.h b/media/libaah_rtp/aah_tx_sender.h new file mode 100644 index 0000000..74206c4 --- /dev/null +++ b/media/libaah_rtp/aah_tx_sender.h @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __AAH_TX_SENDER_H__ +#define __AAH_TX_SENDER_H__ + +#include <media/stagefright/foundation/ALooper.h> +#include <media/stagefright/foundation/AHandlerReflector.h> +#include <utils/RefBase.h> +#include <utils/threads.h> + +#include "aah_tx_packet.h" +#include "pipe_event.h" + +namespace android { + +template <typename T> class CircularBuffer { + public: + CircularBuffer(size_t capacity); + ~CircularBuffer(); + void push_back(const T& item);; + void pop_front(); + size_t size() const; + bool isFull() const; + bool isEmpty() const; + const T& itemAt(size_t index) const; + const T& operator[](size_t index) const; + + private: + T* mBuffer; + size_t mCapacity; + size_t mHead; + size_t mTail; + size_t mFillCount; + + DISALLOW_EVIL_CONSTRUCTORS(CircularBuffer); +}; + +class AAH_TXSender : public virtual RefBase { + public: + ~AAH_TXSender(); + + static sp<AAH_TXSender> GetInstance(); + + ALooper::handler_id handlerID() { return mReflector->id(); } + + // an IP address and port + struct Endpoint { + Endpoint(); + Endpoint(uint32_t a, uint16_t p); + bool operator<(const Endpoint& other) const; + + uint32_t addr; + uint16_t port; + }; + + uint16_t registerEndpoint(const Endpoint& endpoint); + void unregisterEndpoint(const Endpoint& endpoint); + + enum { + kWhatSendPacket, + kWhatTrimRetryBuffers, + kWhatSendHeartbeats, + }; + + // fields for SendPacket messages + static const char* kSendPacketIPAddr; + static const char* kSendPacketPort; + static const char* kSendPacketTRTPPacket; + + private: + AAH_TXSender(); + + static Mutex sLock; + static wp<AAH_TXSender> sInstance; + static uint32_t sNextEpoch; + static bool sNextEpochValid; + + static uint32_t getNextEpoch(); + + typedef CircularBuffer<sp<TRTPPacket> > RetryBuffer; + + // state maintained on a per-endpoint basis + struct EndpointState { + EndpointState(uint32_t epoch); + RetryBuffer retry; + int playerRefCount; + uint16_t trtpSeqNumber; + uint16_t nextProgramID; + uint32_t epoch; + }; + + friend class AHandlerReflector<AAH_TXSender>; + void onMessageReceived(const sp<AMessage>& msg); + void onSendPacket(const sp<AMessage>& msg); + void doSendPacket_l(const sp<TRTPPacket>& packet, + const Endpoint& endpoint); + void trimRetryBuffers(); + void sendHeartbeats(); + bool shouldSendHeartbeats_l(); + + sp<ALooper> mLooper; + sp<AHandlerReflector<AAH_TXSender> > mReflector; + + int mSocket; + nsecs_t mLastSentPacketTime; + + DefaultKeyedVector<Endpoint, EndpointState*> mEndpointMap; + Mutex mEndpointLock; + + static const int kRetryTrimIntervalUs; + static const int kHeartbeatIntervalUs; + static const int kRetryBufferCapacity; + static const nsecs_t kHeartbeatTimeout; + + class RetryReceiver : public Thread { + private: + friend class AAH_TXSender; + + RetryReceiver(AAH_TXSender* sender); + virtual ~RetryReceiver(); + virtual bool threadLoop(); + void handleRetryRequest(); + + static const int kMaxReceiverPacketLen; + static const uint32_t kRetryRequestID; + static const uint32_t kFastStartRequestID; + static const uint32_t kRetryNakID; + + AAH_TXSender* mSender; + PipeEvent mWakeupEvent; + }; + + sp<RetryReceiver> mRetryReceiver; + + DISALLOW_EVIL_CONSTRUCTORS(AAH_TXSender); +}; + +struct RetryPacket { + uint32_t id; + uint32_t endpointIP; + uint16_t endpointPort; + uint16_t seqStart; + uint16_t seqEnd; +} __attribute__((packed)); + +} // namespace android + +#endif // __AAH_TX_SENDER_H__ diff --git a/media/libaah_rtp/pipe_event.cpp b/media/libaah_rtp/pipe_event.cpp new file mode 100644 index 0000000..b8e6960 --- /dev/null +++ b/media/libaah_rtp/pipe_event.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "LibAAH_RTP" +#include <utils/Log.h> + +#include <errno.h> +#include <fcntl.h> +#include <poll.h> +#include <unistd.h> + +#include "pipe_event.h" + +namespace android { + +PipeEvent::PipeEvent() { + pipe_[0] = -1; + pipe_[1] = -1; + + // Create the pipe. + if (pipe(pipe_) >= 0) { + // Set non-blocking mode on the read side of the pipe so we can + // easily drain it whenever we wakeup. + fcntl(pipe_[0], F_SETFL, O_NONBLOCK); + } else { + ALOGE("Failed to create pipe event %d %d %d", + pipe_[0], pipe_[1], errno); + pipe_[0] = -1; + pipe_[1] = -1; + } +} + +PipeEvent::~PipeEvent() { + if (pipe_[0] >= 0) { + close(pipe_[0]); + } + + if (pipe_[1] >= 0) { + close(pipe_[1]); + } +} + +void PipeEvent::clearPendingEvents() { + char drain_buffer[16]; + while (read(pipe_[0], drain_buffer, sizeof(drain_buffer)) > 0) { + // No body. + } +} + +bool PipeEvent::wait(int timeout) { + struct pollfd wait_fd; + + wait_fd.fd = getWakeupHandle(); + wait_fd.events = POLLIN; + wait_fd.revents = 0; + + int res = poll(&wait_fd, 1, timeout); + + if (res < 0) { + ALOGE("Wait error in PipeEvent; sleeping to prevent overload!"); + usleep(1000); + } + + return (res > 0); +} + +void PipeEvent::setEvent() { + char foo = 'q'; + write(pipe_[1], &foo, 1); +} + +} // namespace android + diff --git a/media/libaah_rtp/pipe_event.h b/media/libaah_rtp/pipe_event.h new file mode 100644 index 0000000..e53b0fd --- /dev/null +++ b/media/libaah_rtp/pipe_event.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __PIPE_EVENT_H__ +#define __PIPE_EVENT_H__ + +#include <media/stagefright/foundation/ABase.h> + +namespace android { + +class PipeEvent { + public: + PipeEvent(); + ~PipeEvent(); + + bool initCheck() const { + return ((pipe_[0] >= 0) && (pipe_[1] >= 0)); + } + + int getWakeupHandle() const { return pipe_[0]; } + + // block until the event fires; returns true if the event fired and false if + // the wait timed out. Timeout is expressed in milliseconds; negative + // values mean wait forever. + bool wait(int timeout = -1); + + void clearPendingEvents(); + void setEvent(); + + private: + int pipe_[2]; + + DISALLOW_EVIL_CONSTRUCTORS(PipeEvent); +}; + +} // namespace android + +#endif // __PIPE_EVENT_H__ diff --git a/media/libeffects/data/audio_effects.conf b/media/libeffects/data/audio_effects.conf index b8fa487..ce25bc8 100644 --- a/media/libeffects/data/audio_effects.conf +++ b/media/libeffects/data/audio_effects.conf @@ -50,11 +50,11 @@ effects { } volume { library bundle - uuid 119341a0-8469-11df-81f9- 0002a5d5c51b + uuid 119341a0-8469-11df-81f9-0002a5d5c51b } reverb_env_aux { library reverb - uuid 4a387fc0-8ab3-11df-8bad- 0002a5d5c51b + uuid 4a387fc0-8ab3-11df-8bad-0002a5d5c51b } reverb_env_ins { library reverb diff --git a/media/libeffects/downmix/Android.mk b/media/libeffects/downmix/Android.mk new file mode 100644 index 0000000..0348e1e --- /dev/null +++ b/media/libeffects/downmix/Android.mk @@ -0,0 +1,28 @@ +LOCAL_PATH:= $(call my-dir) + +# Multichannel downmix effect library +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + EffectDownmix.c + +LOCAL_SHARED_LIBRARIES := \ + libcutils + +LOCAL_MODULE:= libdownmix + +LOCAL_MODULE_TAGS := optional + +LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/soundfx + +ifeq ($(TARGET_OS)-$(TARGET_SIMULATOR),linux-true) +LOCAL_LDLIBS += -ldl +endif + +LOCAL_C_INCLUDES := \ + system/media/audio_effects/include \ + system/media/audio_utils/include + +LOCAL_PRELINK_MODULE := false + +include $(BUILD_SHARED_LIBRARY) diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c new file mode 100644 index 0000000..a325172 --- /dev/null +++ b/media/libeffects/downmix/EffectDownmix.c @@ -0,0 +1,889 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define LOG_TAG "EffectDownmix" +#define LOG_NDEBUG 0 +#include <cutils/log.h> +#include <stdlib.h> +#include <string.h> +#include <stdbool.h> +#include "EffectDownmix.h" + +#define MINUS_3_DB_IN_Q19_12 2896 // -3dB = 0.707 * 2^12 = 2896 + +// effect_handle_t interface implementation for downmix effect +const struct effect_interface_s gDownmixInterface = { + Downmix_Process, + Downmix_Command, + Downmix_GetDescriptor, + NULL /* no process_reverse function, no reference stream needed */ +}; + +audio_effect_library_t AUDIO_EFFECT_LIBRARY_INFO_SYM = { + tag : AUDIO_EFFECT_LIBRARY_TAG, + version : EFFECT_LIBRARY_API_VERSION, + name : "Downmix Library", + implementor : "The Android Open Source Project", + query_num_effects : DownmixLib_QueryNumberEffects, + query_effect : DownmixLib_QueryEffect, + create_effect : DownmixLib_Create, + release_effect : DownmixLib_Release, + get_descriptor : DownmixLib_GetDescriptor, +}; + + +// AOSP insert downmix UUID: 93f04452-e4fe-41cc-91f9-e475b6d1d69f +static const effect_descriptor_t gDownmixDescriptor = { + EFFECT_UIID_DOWNMIX__, //type + {0x93f04452, 0xe4fe, 0x41cc, 0x91f9, {0xe4, 0x75, 0xb6, 0xd1, 0xd6, 0x9f}}, // uuid + EFFECT_CONTROL_API_VERSION, + EFFECT_FLAG_TYPE_INSERT | EFFECT_FLAG_INSERT_FIRST, + 0, //FIXME what value should be reported? // cpu load + 0, //FIXME what value should be reported? // memory usage + "Multichannel Downmix To Stereo", // human readable effect name + "The Android Open Source Project" // human readable effect implementor name +}; + +// gDescriptors contains pointers to all defined effect descriptor in this library +static const effect_descriptor_t * const gDescriptors[] = { + &gDownmixDescriptor +}; + +// number of effects in this library +const int kNbEffects = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *); + + +/*---------------------------------------------------------------------------- + * Effect API implementation + *--------------------------------------------------------------------------*/ + +/*--- Effect Library Interface Implementation ---*/ + +int32_t DownmixLib_QueryNumberEffects(uint32_t *pNumEffects) { + ALOGV("DownmixLib_QueryNumberEffects()"); + *pNumEffects = kNbEffects; + return 0; +} + +int32_t DownmixLib_QueryEffect(uint32_t index, effect_descriptor_t *pDescriptor) { + ALOGV("DownmixLib_QueryEffect() index=%d", index); + if (pDescriptor == NULL) { + return -EINVAL; + } + if (index >= (uint32_t)kNbEffects) { + return -EINVAL; + } + memcpy(pDescriptor, gDescriptors[index], sizeof(effect_descriptor_t)); + return 0; +} + + +int32_t DownmixLib_Create(const effect_uuid_t *uuid, + int32_t sessionId, + int32_t ioId, + effect_handle_t *pHandle) { + int ret; + int i; + downmix_module_t *module; + const effect_descriptor_t *desc; + + ALOGV("DownmixLib_Create()"); + + if (pHandle == NULL || uuid == NULL) { + return -EINVAL; + } + + for (i = 0 ; i < kNbEffects ; i++) { + desc = gDescriptors[i]; + if (memcmp(uuid, &desc->uuid, sizeof(effect_uuid_t)) == 0) { + break; + } + } + + if (i == kNbEffects) { + return -ENOENT; + } + + module = malloc(sizeof(downmix_module_t)); + + module->itfe = &gDownmixInterface; + + module->context.state = DOWNMIX_STATE_UNINITIALIZED; + + ret = Downmix_Init(module); + if (ret < 0) { + ALOGW("DownmixLib_Create() init failed"); + free(module); + return ret; + } + + *pHandle = (effect_handle_t) module; + + ALOGV("DownmixLib_Create() %p , size %d", module, sizeof(downmix_module_t)); + + return 0; +} + + +int32_t DownmixLib_Release(effect_handle_t handle) { + downmix_module_t *pDwmModule = (downmix_module_t *)handle; + + ALOGV("DownmixLib_Release() %p", handle); + if (handle == NULL) { + return -EINVAL; + } + + pDwmModule->context.state = DOWNMIX_STATE_UNINITIALIZED; + + free(pDwmModule); + return 0; +} + + +int32_t DownmixLib_GetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) { + ALOGV("DownmixLib_GetDescriptor()"); + int i; + + if (pDescriptor == NULL || uuid == NULL){ + ALOGE("DownmixLib_Create() called with NULL pointer"); + return -EINVAL; + } + ALOGV("DownmixLib_GetDescriptor() nb effects=%d", kNbEffects); + for (i = 0; i < kNbEffects; i++) { + ALOGV("DownmixLib_GetDescriptor() i=%d", i); + if (memcmp(uuid, &gDescriptors[i]->uuid, sizeof(effect_uuid_t)) == 0) { + memcpy(pDescriptor, gDescriptors[i], sizeof(effect_descriptor_t)); + ALOGV("EffectGetDescriptor - UUID matched downmix type %d, UUID = %x", + i, gDescriptors[i]->uuid.timeLow); + return 0; + } + } + + return -EINVAL; +} + + +/*--- Effect Control Interface Implementation ---*/ + +static int Downmix_Process(effect_handle_t self, + audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) { + + downmix_object_t *pDownmixer; + int16_t *pSrc, *pDst; + downmix_module_t *pDwmModule = (downmix_module_t *)self; + + if (pDwmModule == NULL) { + return -EINVAL; + } + + if (inBuffer == NULL || inBuffer->raw == NULL || + outBuffer == NULL || outBuffer->raw == NULL || + inBuffer->frameCount != outBuffer->frameCount) { + return -EINVAL; + } + + pDownmixer = (downmix_object_t*) &pDwmModule->context; + + if (pDownmixer->state == DOWNMIX_STATE_UNINITIALIZED) { + ALOGE("Downmix_Process error: trying to use an uninitialized downmixer"); + return -EINVAL; + } else if (pDownmixer->state == DOWNMIX_STATE_INITIALIZED) { + ALOGE("Downmix_Process error: trying to use a non-configured downmixer"); + return -ENODATA; + } + + pSrc = inBuffer->s16; + pDst = outBuffer->s16; + size_t numFrames = outBuffer->frameCount; + + const bool accumulate = + (pDwmModule->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE); + + switch(pDownmixer->type) { + + case DOWNMIX_TYPE_STRIP: + if (accumulate) { + while (numFrames) { + pDst[0] = clamp16(pDst[0] + pSrc[0]); + pDst[1] = clamp16(pDst[1] + pSrc[1]); + pSrc += pDownmixer->input_channel_count; + pDst += 2; + numFrames--; + } + } else { + while (numFrames) { + pDst[0] = pSrc[0]; + pDst[1] = pSrc[1]; + pSrc += pDownmixer->input_channel_count; + pDst += 2; + numFrames--; + } + } + break; + + case DOWNMIX_TYPE_FOLD: + // optimize for the common formats + switch(pDwmModule->config.inputCfg.channels) { + case AUDIO_CHANNEL_OUT_QUAD: + Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate); + break; + case AUDIO_CHANNEL_OUT_SURROUND: + Downmix_foldFromSurround(pSrc, pDst, numFrames, accumulate); + break; + case AUDIO_CHANNEL_OUT_5POINT1: + Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate); + break; + case AUDIO_CHANNEL_OUT_7POINT1: + Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate); + break; + default: + // FIXME implement generic downmix + ALOGE("Multichannel configurations other than quad, 4.0, 5.1 and 7.1 are not supported"); + break; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + + +static int Downmix_Command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize, + void *pCmdData, uint32_t *replySize, void *pReplyData) { + + downmix_module_t *pDwmModule = (downmix_module_t *) self; + downmix_object_t *pDownmixer; + int retsize; + + if (pDwmModule == NULL || pDwmModule->context.state == DOWNMIX_STATE_UNINITIALIZED) { + return -EINVAL; + } + + pDownmixer = (downmix_object_t*) &pDwmModule->context; + + ALOGV("Downmix_Command command %d cmdSize %d",cmdCode, cmdSize); + + switch (cmdCode) { + case EFFECT_CMD_INIT: + if (pReplyData == NULL || *replySize != sizeof(int)) { + return -EINVAL; + } + *(int *) pReplyData = Downmix_Init(pDwmModule); + break; + + case EFFECT_CMD_SET_CONFIG: + if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) + || pReplyData == NULL || *replySize != sizeof(int)) { + return -EINVAL; + } + *(int *) pReplyData = Downmix_Configure(pDwmModule, + (effect_config_t *)pCmdData, false); + break; + + case EFFECT_CMD_RESET: + Downmix_Reset(pDownmixer, false); + break; + + case EFFECT_CMD_GET_PARAM: + ALOGV("Downmix_Command EFFECT_CMD_GET_PARAM pCmdData %p, *replySize %d, pReplyData: %p", + pCmdData, *replySize, pReplyData); + if (pCmdData == NULL || cmdSize < (int)(sizeof(effect_param_t) + sizeof(int32_t)) || + pReplyData == NULL || + *replySize < (int) sizeof(effect_param_t) + 2 * sizeof(int32_t)) { + return -EINVAL; + } + effect_param_t *rep = (effect_param_t *) pReplyData; + memcpy(pReplyData, pCmdData, sizeof(effect_param_t) + sizeof(int32_t)); + ALOGV("Downmix_Command EFFECT_CMD_GET_PARAM param %d, replySize %d", + *(int32_t *)rep->data, rep->vsize); + rep->status = Downmix_getParameter(pDownmixer, *(int32_t *)rep->data, &rep->vsize, + rep->data + sizeof(int32_t)); + *replySize = sizeof(effect_param_t) + sizeof(int32_t) + rep->vsize; + break; + + case EFFECT_CMD_SET_PARAM: + ALOGV("Downmix_Command EFFECT_CMD_SET_PARAM cmdSize %d pCmdData %p, *replySize %d, " \ + "pReplyData %p", cmdSize, pCmdData, *replySize, pReplyData); + if (pCmdData == NULL || (cmdSize < (int)(sizeof(effect_param_t) + sizeof(int32_t))) + || pReplyData == NULL || *replySize != (int)sizeof(int32_t)) { + return -EINVAL; + } + effect_param_t *cmd = (effect_param_t *) pCmdData; + *(int *)pReplyData = Downmix_setParameter(pDownmixer, *(int32_t *)cmd->data, + cmd->vsize, cmd->data + sizeof(int32_t)); + break; + + case EFFECT_CMD_SET_PARAM_DEFERRED: + //FIXME implement + ALOGW("Downmix_Command command EFFECT_CMD_SET_PARAM_DEFERRED not supported, FIXME"); + break; + + case EFFECT_CMD_SET_PARAM_COMMIT: + //FIXME implement + ALOGW("Downmix_Command command EFFECT_CMD_SET_PARAM_COMMIT not supported, FIXME"); + break; + + case EFFECT_CMD_ENABLE: + if (pReplyData == NULL || *replySize != sizeof(int)) { + return -EINVAL; + } + if (pDownmixer->state != DOWNMIX_STATE_INITIALIZED) { + return -ENOSYS; + } + pDownmixer->state = DOWNMIX_STATE_ACTIVE; + ALOGV("EFFECT_CMD_ENABLE() OK"); + *(int *)pReplyData = 0; + break; + + case EFFECT_CMD_DISABLE: + if (pReplyData == NULL || *replySize != sizeof(int)) { + return -EINVAL; + } + if (pDownmixer->state != DOWNMIX_STATE_ACTIVE) { + return -ENOSYS; + } + pDownmixer->state = DOWNMIX_STATE_INITIALIZED; + ALOGV("EFFECT_CMD_DISABLE() OK"); + *(int *)pReplyData = 0; + break; + + case EFFECT_CMD_SET_DEVICE: + if (pCmdData == NULL || cmdSize != (int)sizeof(uint32_t)) { + return -EINVAL; + } + // FIXME change type if playing on headset vs speaker + ALOGV("Downmix_Command EFFECT_CMD_SET_DEVICE: 0x%08x", *(uint32_t *)pCmdData); + break; + + case EFFECT_CMD_SET_VOLUME: { + // audio output is always stereo => 2 channel volumes + if (pCmdData == NULL || cmdSize != (int)sizeof(uint32_t) * 2) { + return -EINVAL; + } + // FIXME change volume + ALOGW("Downmix_Command command EFFECT_CMD_SET_VOLUME not supported, FIXME"); + float left = (float)(*(uint32_t *)pCmdData) / (1 << 24); + float right = (float)(*((uint32_t *)pCmdData + 1)) / (1 << 24); + ALOGV("Downmix_Command EFFECT_CMD_SET_VOLUME: left %f, right %f ", left, right); + break; + } + + case EFFECT_CMD_SET_AUDIO_MODE: + if (pCmdData == NULL || cmdSize != (int)sizeof(uint32_t)) { + return -EINVAL; + } + ALOGV("Downmix_Command EFFECT_CMD_SET_AUDIO_MODE: %d", *(uint32_t *)pCmdData); + break; + + case EFFECT_CMD_SET_CONFIG_REVERSE: + case EFFECT_CMD_SET_INPUT_DEVICE: + // these commands are ignored by a downmix effect + break; + + default: + ALOGW("Downmix_Command invalid command %d",cmdCode); + return -EINVAL; + } + + return 0; +} + + +int Downmix_GetDescriptor(effect_handle_t self, effect_descriptor_t *pDescriptor) +{ + downmix_module_t *pDwnmxModule = (downmix_module_t *) self; + + if (pDwnmxModule == NULL || + pDwnmxModule->context.state == DOWNMIX_STATE_UNINITIALIZED) { + return -EINVAL; + } + + memcpy(pDescriptor, &gDownmixDescriptor, sizeof(effect_descriptor_t)); + + return 0; +} + + +/*---------------------------------------------------------------------------- + * Downmix internal functions + *--------------------------------------------------------------------------*/ + +/*---------------------------------------------------------------------------- + * Downmix_Init() + *---------------------------------------------------------------------------- + * Purpose: + * Initialize downmix context and apply default parameters + * + * Inputs: + * pDwmModule pointer to downmix effect module + * + * Outputs: + * + * Returns: + * 0 indicates success + * + * Side Effects: + * updates: + * pDwmModule->context.type + * pDwmModule->context.apply_volume_correction + * pDwmModule->config.inputCfg + * pDwmModule->config.outputCfg + * pDwmModule->config.inputCfg.samplingRate + * pDwmModule->config.outputCfg.samplingRate + * pDwmModule->context.state + * doesn't set: + * pDwmModule->itfe + * + *---------------------------------------------------------------------------- + */ + +int Downmix_Init(downmix_module_t *pDwmModule) { + + ALOGV("Downmix_Init module %p", pDwmModule); + int ret = 0; + + memset(&pDwmModule->context, 0, sizeof(downmix_object_t)); + + pDwmModule->config.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ; + pDwmModule->config.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT; + pDwmModule->config.inputCfg.channels = AUDIO_CHANNEL_OUT_7POINT1; + pDwmModule->config.inputCfg.bufferProvider.getBuffer = NULL; + pDwmModule->config.inputCfg.bufferProvider.releaseBuffer = NULL; + pDwmModule->config.inputCfg.bufferProvider.cookie = NULL; + pDwmModule->config.inputCfg.mask = EFFECT_CONFIG_ALL; + + pDwmModule->config.inputCfg.samplingRate = 44100; + pDwmModule->config.outputCfg.samplingRate = pDwmModule->config.inputCfg.samplingRate; + + // set a default value for the access mode, but should be overwritten by caller + pDwmModule->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE; + pDwmModule->config.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT; + pDwmModule->config.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO; + pDwmModule->config.outputCfg.bufferProvider.getBuffer = NULL; + pDwmModule->config.outputCfg.bufferProvider.releaseBuffer = NULL; + pDwmModule->config.outputCfg.bufferProvider.cookie = NULL; + pDwmModule->config.outputCfg.mask = EFFECT_CONFIG_ALL; + + ret = Downmix_Configure(pDwmModule, &pDwmModule->config, true); + if (ret != 0) { + ALOGV("Downmix_Init error %d on module %p", ret, pDwmModule); + } else { + pDwmModule->context.state = DOWNMIX_STATE_INITIALIZED; + } + + return ret; +} + + +/*---------------------------------------------------------------------------- + * Downmix_Configure() + *---------------------------------------------------------------------------- + * Purpose: + * Set input and output audio configuration. + * + * Inputs: + * pDwmModule pointer to downmix effect module + * pConfig pointer to effect_config_t structure containing input + * and output audio parameters configuration + * init true if called from init function + * + * Outputs: + * + * Returns: + * 0 indicates success + * + * Side Effects: + * + *---------------------------------------------------------------------------- + */ + +int Downmix_Configure(downmix_module_t *pDwmModule, effect_config_t *pConfig, bool init) { + + downmix_object_t *pDownmixer = &pDwmModule->context; + + // Check configuration compatibility with build options, and effect capabilities + if (pConfig->inputCfg.samplingRate != pConfig->outputCfg.samplingRate + || pConfig->outputCfg.channels != DOWNMIX_OUTPUT_CHANNELS + || pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT + || pConfig->outputCfg.format != AUDIO_FORMAT_PCM_16_BIT) { + ALOGE("Downmix_Configure error: invalid config"); + return -EINVAL; + } + + memcpy(&pDwmModule->config, pConfig, sizeof(effect_config_t)); + + if (init) { + pDownmixer->type = DOWNMIX_TYPE_FOLD; + pDownmixer->apply_volume_correction = false; + pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1 + } else { + // when configuring the effect, do not allow a blank channel mask + if (pConfig->inputCfg.channels == 0) { + ALOGE("Downmix_Configure error: input channel mask can't be 0"); + return -EINVAL; + } + pDownmixer->input_channel_count = popcount(pConfig->inputCfg.channels); + } + + Downmix_Reset(pDownmixer, init); + + return 0; +} + + +/*---------------------------------------------------------------------------- + * Downmix_Reset() + *---------------------------------------------------------------------------- + * Purpose: + * Reset internal states. + * + * Inputs: + * pDownmixer pointer to downmix context + * init true if called from init function + * + * Outputs: +* + * Returns: + * 0 indicates success + * + * Side Effects: + * + *---------------------------------------------------------------------------- + */ + +int Downmix_Reset(downmix_object_t *pDownmixer, bool init) { + // nothing to do here + return 0; +} + + +/*---------------------------------------------------------------------------- + * Downmix_setParameter() + *---------------------------------------------------------------------------- + * Purpose: + * Set a Downmix parameter + * + * Inputs: + * pDownmixer handle to instance data + * param parameter + * pValue pointer to parameter value + * size value size + * + * Outputs: + * + * Returns: + * 0 indicates success + * + * Side Effects: + * + *---------------------------------------------------------------------------- + */ +int Downmix_setParameter(downmix_object_t *pDownmixer, int32_t param, size_t size, void *pValue) { + + int16_t value16; + ALOGV("Downmix_setParameter, context %p, param %d, value16 %d, value32 %d", + pDownmixer, param, *(int16_t *)pValue, *(int32_t *)pValue); + + switch (param) { + + case DOWNMIX_PARAM_TYPE: + if (size != sizeof(downmix_type_t)) { + ALOGE("Downmix_setParameter(DOWNMIX_PARAM_TYPE) invalid size %d, should be %d", + size, sizeof(downmix_type_t)); + return -EINVAL; + } + value16 = *(int16_t *)pValue; + ALOGV("set DOWNMIX_PARAM_TYPE, type %d", value16); + if (!((value16 > DOWNMIX_TYPE_INVALID) && (value16 < DOWNMIX_TYPE_LAST))) { + ALOGE("Downmix_setParameter invalid DOWNMIX_PARAM_TYPE value %d", value16); + return -EINVAL; + } else { + pDownmixer->type = (downmix_type_t) value16; + break; + + default: + ALOGE("Downmix_setParameter unknown parameter %d", param); + return -EINVAL; + } +} + + return 0; +} /* end Downmix_setParameter */ + + +/*---------------------------------------------------------------------------- + * Downmix_getParameter() + *---------------------------------------------------------------------------- + * Purpose: + * Get a Downmix parameter + * + * Inputs: + * pDownmixer handle to instance data + * param parameter + * pValue pointer to variable to hold retrieved value + * pSize pointer to value size: maximum size as input + * + * Outputs: + * *pValue updated with parameter value + * *pSize updated with actual value size + * + * Returns: + * 0 indicates success + * + * Side Effects: + * + *---------------------------------------------------------------------------- + */ +int Downmix_getParameter(downmix_object_t *pDownmixer, int32_t param, size_t *pSize, void *pValue) { + int16_t *pValue16; + + switch (param) { + + case DOWNMIX_PARAM_TYPE: + if (*pSize < sizeof(int16_t)) { + ALOGE("Downmix_getParameter invalid parameter size %d for DOWNMIX_PARAM_TYPE", *pSize); + return -EINVAL; + } + pValue16 = (int16_t *)pValue; + *pValue16 = (int16_t) pDownmixer->type; + *pSize = sizeof(int16_t); + ALOGV("Downmix_getParameter DOWNMIX_PARAM_TYPE is %d", *pValue16); + break; + + default: + ALOGE("Downmix_getParameter unknown parameter %d", param); + return -EINVAL; + } + + return 0; +} /* end Downmix_getParameter */ + + +/*---------------------------------------------------------------------------- + * Downmix_foldFromQuad() + *---------------------------------------------------------------------------- + * Purpose: + * downmix a quad signal to stereo + * + * Inputs: + * pSrc quad audio samples to downmix + * numFrames the number of quad frames to downmix + * + * Outputs: + * pDst downmixed stereo audio samples + * + *---------------------------------------------------------------------------- + */ +void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) { + // sample at index 0 is FL + // sample at index 1 is FR + // sample at index 2 is RL + // sample at index 3 is RR + if (accumulate) { + while (numFrames) { + // FL + RL + pDst[0] = clamp16(pDst[0] + pSrc[0] + pSrc[2]); + // FR + RR + pDst[1] = clamp16(pDst[1] + pSrc[1] + pSrc[3]); + pSrc += 4; + pDst += 2; + numFrames--; + } + } else { // same code as above but without adding and clamping pDst[i] to itself + while (numFrames) { + // FL + RL + pDst[0] = clamp16(pSrc[0] + pSrc[2]); + // FR + RR + pDst[1] = clamp16(pSrc[1] + pSrc[3]); + pSrc += 4; + pDst += 2; + numFrames--; + } + } +} + + +/*---------------------------------------------------------------------------- + * Downmix_foldFromSurround() + *---------------------------------------------------------------------------- + * Purpose: + * downmix a "surround sound" (mono rear) signal to stereo + * + * Inputs: + * pSrc surround signal to downmix + * numFrames the number of surround frames to downmix + * + * Outputs: + * pDst downmixed stereo audio samples + * + *---------------------------------------------------------------------------- + */ +void Downmix_foldFromSurround(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) { + int32_t lt, rt, centerPlusRearContrib; // samples in Q19.12 format + // sample at index 0 is FL + // sample at index 1 is FR + // sample at index 2 is FC + // sample at index 3 is RC + if (accumulate) { + while (numFrames) { + // centerPlusRearContrib = FC(-3dB) + RC(-3dB) + centerPlusRearContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + (pSrc[3] * MINUS_3_DB_IN_Q19_12); + // FL + centerPlusRearContrib + lt = (pSrc[0] << 12) + centerPlusRearContrib; + // FR + centerPlusRearContrib + rt = (pSrc[1] << 12) + centerPlusRearContrib; + pDst[0] = clamp16(pDst[0] + (lt >> 12)); + pDst[1] = clamp16(pDst[1] + (rt >> 12)); + pSrc += 4; + pDst += 2; + numFrames--; + } + } else { // same code as above but without adding and clamping pDst[i] to itself + while (numFrames) { + // centerPlusRearContrib = FC(-3dB) + RC(-3dB) + centerPlusRearContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + (pSrc[3] * MINUS_3_DB_IN_Q19_12); + // FL + centerPlusRearContrib + lt = (pSrc[0] << 12) + centerPlusRearContrib; + // FR + centerPlusRearContrib + rt = (pSrc[1] << 12) + centerPlusRearContrib; + pDst[0] = clamp16(lt >> 12); + pDst[1] = clamp16(rt >> 12); + pSrc += 4; + pDst += 2; + numFrames--; + } + } +} + + +/*---------------------------------------------------------------------------- + * Downmix_foldFrom5Point1() + *---------------------------------------------------------------------------- + * Purpose: + * downmix a 5.1 signal to stereo + * + * Inputs: + * pSrc 5.1 audio samples to downmix + * numFrames the number of 5.1 frames to downmix + * + * Outputs: + * pDst downmixed stereo audio samples + * + *---------------------------------------------------------------------------- + */ +void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) { + int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format + // sample at index 0 is FL + // sample at index 1 is FR + // sample at index 2 is FC + // sample at index 3 is LFE + // sample at index 4 is RL + // sample at index 5 is RR + if (accumulate) { + while (numFrames) { + // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB) + centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + + (pSrc[3] * MINUS_3_DB_IN_Q19_12); + // FL + centerPlusLfeContrib + RL + lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12); + // FR + centerPlusLfeContrib + RR + rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12); + pDst[0] = clamp16(pDst[0] + (lt >> 12)); + pDst[1] = clamp16(pDst[1] + (rt >> 12)); + pSrc += 6; + pDst += 2; + numFrames--; + } + } else { // same code as above but without adding and clamping pDst[i] to itself + while (numFrames) { + // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB) + centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + + (pSrc[3] * MINUS_3_DB_IN_Q19_12); + // FL + centerPlusLfeContrib + RL + lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[4] << 12); + // FR + centerPlusLfeContrib + RR + rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[5] << 12); + pDst[0] = clamp16(lt >> 12); + pDst[1] = clamp16(rt >> 12); + pSrc += 6; + pDst += 2; + numFrames--; + } + } +} + + +/*---------------------------------------------------------------------------- + * Downmix_foldFrom7Point1() + *---------------------------------------------------------------------------- + * Purpose: + * downmix a 7.1 signal to stereo + * + * Inputs: + * pSrc 7.1 audio samples to downmix + * numFrames the number of 7.1 frames to downmix + * + * Outputs: + * pDst downmixed stereo audio samples + * + *---------------------------------------------------------------------------- + */ +void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) { + int32_t lt, rt, centerPlusLfeContrib; // samples in Q19.12 format + // sample at index 0 is FL + // sample at index 1 is FR + // sample at index 2 is FC + // sample at index 3 is LFE + // sample at index 4 is RL + // sample at index 5 is RR + // sample at index 6 is SL + // sample at index 7 is SR + if (accumulate) { + while (numFrames) { + // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB) + centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + + (pSrc[3] * MINUS_3_DB_IN_Q19_12); + // FL + centerPlusLfeContrib + SL + RL + lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12); + // FR + centerPlusLfeContrib + SR + RR + rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12); + pDst[0] = clamp16(lt >> 12); + pDst[1] = clamp16(rt >> 12); + pSrc += 8; + pDst += 2; + numFrames--; + } + } else { // same code as above but without adding and clamping pDst[i] to itself + while (numFrames) { + // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB) + centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_Q19_12) + + (pSrc[3] * MINUS_3_DB_IN_Q19_12); + // FL + centerPlusLfeContrib + SL + RL + lt = (pSrc[0] << 12) + centerPlusLfeContrib + (pSrc[6] << 12) + (pSrc[4] << 12); + // FR + centerPlusLfeContrib + SR + RR + rt = (pSrc[1] << 12) + centerPlusLfeContrib + (pSrc[7] << 12) + (pSrc[5] << 12); + pDst[0] = clamp16(pDst[0] + (lt >> 12)); + pDst[1] = clamp16(pDst[1] + (rt >> 12)); + pSrc += 8; + pDst += 2; + numFrames--; + } + } +} + diff --git a/media/libeffects/downmix/EffectDownmix.h b/media/libeffects/downmix/EffectDownmix.h new file mode 100644 index 0000000..4176b5a --- /dev/null +++ b/media/libeffects/downmix/EffectDownmix.h @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_EFFECTDOWNMIX_H_ +#define ANDROID_EFFECTDOWNMIX_H_ + +#include <audio_effects/effect_downmix.h> +#include <audio_utils/primitives.h> +#include <system/audio.h> + +/*------------------------------------ + * definitions + *------------------------------------ +*/ + +#define DOWNMIX_OUTPUT_CHANNELS AUDIO_CHANNEL_OUT_STEREO + +typedef enum { + DOWNMIX_STATE_UNINITIALIZED, + DOWNMIX_STATE_INITIALIZED, + DOWNMIX_STATE_ACTIVE, +} downmix_state_t; + +/* parameters for each downmixer */ +typedef struct { + downmix_state_t state; + downmix_type_t type; + bool apply_volume_correction; + uint8_t input_channel_count; +} downmix_object_t; + + +typedef struct downmix_module_s { + const struct effect_interface_s *itfe; + effect_config_t config; + downmix_object_t context; +} downmix_module_t; + + +/*------------------------------------ + * Effect API + *------------------------------------ +*/ +int32_t DownmixLib_QueryNumberEffects(uint32_t *pNumEffects); +int32_t DownmixLib_QueryEffect(uint32_t index, + effect_descriptor_t *pDescriptor); +int32_t DownmixLib_Create(const effect_uuid_t *uuid, + int32_t sessionId, + int32_t ioId, + effect_handle_t *pHandle); +int32_t DownmixLib_Release(effect_handle_t handle); +int32_t DownmixLib_GetDescriptor(const effect_uuid_t *uuid, + effect_descriptor_t *pDescriptor); + +static int Downmix_Process(effect_handle_t self, + audio_buffer_t *inBuffer, + audio_buffer_t *outBuffer); +static int Downmix_Command(effect_handle_t self, + uint32_t cmdCode, + uint32_t cmdSize, + void *pCmdData, + uint32_t *replySize, + void *pReplyData); +static int Downmix_GetDescriptor(effect_handle_t self, + effect_descriptor_t *pDescriptor); + + +/*------------------------------------ + * internal functions + *------------------------------------ +*/ +int Downmix_Init(downmix_module_t *pDwmModule); +int Downmix_Configure(downmix_module_t *pDwmModule, effect_config_t *pConfig, bool init); +int Downmix_Reset(downmix_object_t *pDownmixer, bool init); +int Downmix_setParameter(downmix_object_t *pDownmixer, int32_t param, size_t size, void *pValue); +int Downmix_getParameter(downmix_object_t *pDownmixer, int32_t param, size_t *pSize, void *pValue); + +void Downmix_foldFromQuad(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate); +void Downmix_foldFromSurround(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate); +void Downmix_foldFrom5Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate); +void Downmix_foldFrom7Point1(int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate); + +#endif /*ANDROID_EFFECTDOWNMIX_H_*/ diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c index 9f6599f..59cd9e3 100644 --- a/media/libeffects/factory/EffectsFactory.c +++ b/media/libeffects/factory/EffectsFactory.c @@ -53,8 +53,8 @@ static int loadEffect(cnode *node); static lib_entry_t *getLibrary(const char *path); static void resetEffectEnumeration(); static uint32_t updateNumEffects(); -static int findEffect(effect_uuid_t *type, - effect_uuid_t *uuid, +static int findEffect(const effect_uuid_t *type, + const effect_uuid_t *uuid, lib_entry_t **lib, effect_descriptor_t **desc); static void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len); @@ -236,7 +236,7 @@ int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor) return ret; } -int EffectGetDescriptor(effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) +int EffectGetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) { lib_entry_t *l = NULL; effect_descriptor_t *d = NULL; @@ -257,7 +257,7 @@ int EffectGetDescriptor(effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) return ret; } -int EffectCreate(effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle) +int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle) { list_elem_t *e = gLibraryList; lib_entry_t *l = NULL; @@ -372,7 +372,7 @@ exit: return ret; } -int EffectIsNullUuid(effect_uuid_t *uuid) +int EffectIsNullUuid(const effect_uuid_t *uuid) { if (memcmp(uuid, EFFECT_UUID_NULL, sizeof(effect_uuid_t))) { return 0; @@ -628,8 +628,8 @@ uint32_t updateNumEffects() { return cnt; } -int findEffect(effect_uuid_t *type, - effect_uuid_t *uuid, +int findEffect(const effect_uuid_t *type, + const effect_uuid_t *uuid, lib_entry_t **lib, effect_descriptor_t **desc) { diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp index 62be78c..3714283 100644 --- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp +++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp @@ -133,7 +133,8 @@ int LvmBundle_init (EffectContext *pContext); int LvmEffect_enable (EffectContext *pContext); int LvmEffect_disable (EffectContext *pContext); void LvmEffect_free (EffectContext *pContext); -int Effect_configure (EffectContext *pContext, effect_config_t *pConfig); +int Effect_setConfig (EffectContext *pContext, effect_config_t *pConfig); +void Effect_getConfig (EffectContext *pContext, effect_config_t *pConfig); int BassBoost_setParameter (EffectContext *pContext, void *pParam, void *pValue); int BassBoost_getParameter (EffectContext *pContext, void *pParam, @@ -194,7 +195,7 @@ extern "C" int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescripto return 0; } /* end EffectQueryEffect */ -extern "C" int EffectCreate(effect_uuid_t *uuid, +extern "C" int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle){ @@ -470,7 +471,7 @@ extern "C" int EffectRelease(effect_handle_t handle){ } /* end EffectRelease */ -extern "C" int EffectGetDescriptor(effect_uuid_t *uuid, +extern "C" int EffectGetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) { const effect_descriptor_t *desc = NULL; @@ -936,7 +937,7 @@ void LvmEffect_free(EffectContext *pContext){ } /* end LvmEffect_free */ //---------------------------------------------------------------------------- -// Effect_configure() +// Effect_setConfig() //---------------------------------------------------------------------------- // Purpose: Set input and output audio configuration. // @@ -949,9 +950,9 @@ void LvmEffect_free(EffectContext *pContext){ // //---------------------------------------------------------------------------- -int Effect_configure(EffectContext *pContext, effect_config_t *pConfig){ +int Effect_setConfig(EffectContext *pContext, effect_config_t *pConfig){ LVM_Fs_en SampleRate; - //ALOGV("\tEffect_configure start"); + //ALOGV("\tEffect_setConfig start"); CHECK_ARG(pContext != NULL); CHECK_ARG(pConfig != NULL); @@ -992,7 +993,7 @@ int Effect_configure(EffectContext *pContext, effect_config_t *pConfig){ pContext->pBundledContext->SamplesPerSecond = 48000*2; // 2 secs Stereo break; default: - ALOGV("\tEffect_Configure invalid sampling rate %d", pConfig->inputCfg.samplingRate); + ALOGV("\tEffect_setConfig invalid sampling rate %d", pConfig->inputCfg.samplingRate); return -EINVAL; } @@ -1001,28 +1002,47 @@ int Effect_configure(EffectContext *pContext, effect_config_t *pConfig){ LVM_ControlParams_t ActiveParams; LVM_ReturnStatus_en LvmStatus = LVM_SUCCESS; - ALOGV("\tEffect_configure change sampling rate to %d", SampleRate); + ALOGV("\tEffect_setConfig change sampling rate to %d", SampleRate); /* Get the current settings */ LvmStatus = LVM_GetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams); - LVM_ERROR_CHECK(LvmStatus, "LVM_GetControlParameters", "Effect_configure") + LVM_ERROR_CHECK(LvmStatus, "LVM_GetControlParameters", "Effect_setConfig") if(LvmStatus != LVM_SUCCESS) return -EINVAL; LvmStatus = LVM_SetControlParameters(pContext->pBundledContext->hInstance, &ActiveParams); - LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "Effect_configure") - ALOGV("\tEffect_configure Succesfully called LVM_SetControlParameters\n"); + LVM_ERROR_CHECK(LvmStatus, "LVM_SetControlParameters", "Effect_setConfig") + ALOGV("\tEffect_setConfig Succesfully called LVM_SetControlParameters\n"); pContext->pBundledContext->SampleRate = SampleRate; }else{ - //ALOGV("\tEffect_configure keep sampling rate at %d", SampleRate); + //ALOGV("\tEffect_setConfig keep sampling rate at %d", SampleRate); } - //ALOGV("\tEffect_configure End...."); + //ALOGV("\tEffect_setConfig End...."); return 0; -} /* end Effect_configure */ +} /* end Effect_setConfig */ + +//---------------------------------------------------------------------------- +// Effect_getConfig() +//---------------------------------------------------------------------------- +// Purpose: Get input and output audio configuration. +// +// Inputs: +// pContext: effect engine context +// pConfig: pointer to effect_config_t structure holding input and output +// configuration parameters +// +// Outputs: +// +//---------------------------------------------------------------------------- + +void Effect_getConfig(EffectContext *pContext, effect_config_t *pConfig) +{ + memcpy(pConfig, &pContext->config, sizeof(effect_config_t)); +} /* end Effect_getConfig */ //---------------------------------------------------------------------------- // BassGetStrength() @@ -2778,23 +2798,34 @@ int Effect_command(effect_handle_t self, } break; - case EFFECT_CMD_CONFIGURE: - //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_CONFIGURE start"); + case EFFECT_CMD_SET_CONFIG: + //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_SET_CONFIG start"); if (pCmdData == NULL|| cmdSize != sizeof(effect_config_t)|| pReplyData == NULL|| *replySize != sizeof(int)){ ALOGV("\tLVM_ERROR : Effect_command cmdCode Case: " - "EFFECT_CMD_CONFIGURE: ERROR"); + "EFFECT_CMD_SET_CONFIG: ERROR"); return -EINVAL; } - *(int *) pReplyData = android::Effect_configure(pContext, (effect_config_t *) pCmdData); - //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_CONFIGURE end"); + *(int *) pReplyData = android::Effect_setConfig(pContext, (effect_config_t *) pCmdData); + //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_SET_CONFIG end"); + break; + + case EFFECT_CMD_GET_CONFIG: + if (pReplyData == NULL || + *replySize != sizeof(effect_config_t)) { + ALOGV("\tLVM_ERROR : Effect_command cmdCode Case: " + "EFFECT_CMD_GET_CONFIG: ERROR"); + return -EINVAL; + } + + android::Effect_getConfig(pContext, (effect_config_t *)pReplyData); break; case EFFECT_CMD_RESET: //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_RESET start"); - android::Effect_configure(pContext, &pContext->config); + android::Effect_setConfig(pContext, &pContext->config); //ALOGV("\tEffect_command cmdCode Case: EFFECT_CMD_RESET end"); break; @@ -3078,20 +3109,20 @@ int Effect_command(effect_handle_t self, if (pContext->pBundledContext->bBassEnabled == LVM_TRUE) { ALOGV("\tEFFECT_CMD_SET_DEVICE disable LVM_BASS_BOOST %d", - *(int32_t *)pCmdData); + *(int32_t *)pCmdData); android::LvmEffect_disable(pContext); } pContext->pBundledContext->bBassTempDisabled = LVM_TRUE; } else { ALOGV("\tEFFECT_CMD_SET_DEVICE device is valid for LVM_BASS_BOOST %d", - *(int32_t *)pCmdData); + *(int32_t *)pCmdData); // If a device supports bassboost and the effect has been temporarily disabled // previously then re-enable it if (pContext->pBundledContext->bBassEnabled == LVM_TRUE) { ALOGV("\tEFFECT_CMD_SET_DEVICE re-enable LVM_BASS_BOOST %d", - *(int32_t *)pCmdData); + *(int32_t *)pCmdData); android::LvmEffect_enable(pContext); } pContext->pBundledContext->bBassTempDisabled = LVM_FALSE; diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp index 1825aab..358357e 100755 --- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp +++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp @@ -175,7 +175,8 @@ enum { //--- local function prototypes int Reverb_init (ReverbContext *pContext); void Reverb_free (ReverbContext *pContext); -int Reverb_configure (ReverbContext *pContext, effect_config_t *pConfig); +int Reverb_setConfig (ReverbContext *pContext, effect_config_t *pConfig); +void Reverb_getConfig (ReverbContext *pContext, effect_config_t *pConfig); int Reverb_setParameter (ReverbContext *pContext, void *pParam, void *pValue); int Reverb_getParameter (ReverbContext *pContext, void *pParam, @@ -209,7 +210,7 @@ extern "C" int EffectQueryEffect(uint32_t index, return 0; } /* end EffectQueryEffect */ -extern "C" int EffectCreate(effect_uuid_t *uuid, +extern "C" int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle){ @@ -316,7 +317,7 @@ extern "C" int EffectRelease(effect_handle_t handle){ return 0; } /* end EffectRelease */ -extern "C" int EffectGetDescriptor(effect_uuid_t *uuid, +extern "C" int EffectGetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) { int i; int length = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *); @@ -609,7 +610,7 @@ void Reverb_free(ReverbContext *pContext){ } /* end Reverb_free */ //---------------------------------------------------------------------------- -// Reverb_configure() +// Reverb_setConfig() //---------------------------------------------------------------------------- // Purpose: Set input and output audio configuration. // @@ -622,9 +623,9 @@ void Reverb_free(ReverbContext *pContext){ // //---------------------------------------------------------------------------- -int Reverb_configure(ReverbContext *pContext, effect_config_t *pConfig){ +int Reverb_setConfig(ReverbContext *pContext, effect_config_t *pConfig){ LVM_Fs_en SampleRate; - //ALOGV("\tReverb_configure start"); + //ALOGV("\tReverb_setConfig start"); CHECK_ARG(pContext != NULL); CHECK_ARG(pConfig != NULL); @@ -642,7 +643,7 @@ int Reverb_configure(ReverbContext *pContext, effect_config_t *pConfig){ return -EINVAL; } - //ALOGV("\tReverb_configure calling memcpy"); + //ALOGV("\tReverb_setConfig calling memcpy"); memcpy(&pContext->config, pConfig, sizeof(effect_config_t)); @@ -666,7 +667,7 @@ int Reverb_configure(ReverbContext *pContext, effect_config_t *pConfig){ SampleRate = LVM_FS_48000; break; default: - ALOGV("\rReverb_Configure invalid sampling rate %d", pConfig->inputCfg.samplingRate); + ALOGV("\rReverb_setConfig invalid sampling rate %d", pConfig->inputCfg.samplingRate); return -EINVAL; } @@ -675,28 +676,46 @@ int Reverb_configure(ReverbContext *pContext, effect_config_t *pConfig){ LVREV_ControlParams_st ActiveParams; LVREV_ReturnStatus_en LvmStatus = LVREV_SUCCESS; - //ALOGV("\tReverb_configure change sampling rate to %d", SampleRate); + //ALOGV("\tReverb_setConfig change sampling rate to %d", SampleRate); /* Get the current settings */ LvmStatus = LVREV_GetControlParameters(pContext->hInstance, &ActiveParams); - LVM_ERROR_CHECK(LvmStatus, "LVREV_GetControlParameters", "Reverb_configure") + LVM_ERROR_CHECK(LvmStatus, "LVREV_GetControlParameters", "Reverb_setConfig") if(LvmStatus != LVREV_SUCCESS) return -EINVAL; LvmStatus = LVREV_SetControlParameters(pContext->hInstance, &ActiveParams); - LVM_ERROR_CHECK(LvmStatus, "LVREV_SetControlParameters", "Reverb_configure") - //ALOGV("\tReverb_configure Succesfully called LVREV_SetControlParameters\n"); + LVM_ERROR_CHECK(LvmStatus, "LVREV_SetControlParameters", "Reverb_setConfig") + //ALOGV("\tReverb_setConfig Succesfully called LVREV_SetControlParameters\n"); }else{ - //ALOGV("\tReverb_configure keep sampling rate at %d", SampleRate); + //ALOGV("\tReverb_setConfig keep sampling rate at %d", SampleRate); } - //ALOGV("\tReverb_configure End"); + //ALOGV("\tReverb_setConfig End"); return 0; -} /* end Reverb_configure */ +} /* end Reverb_setConfig */ +//---------------------------------------------------------------------------- +// Reverb_getConfig() +//---------------------------------------------------------------------------- +// Purpose: Get input and output audio configuration. +// +// Inputs: +// pContext: effect engine context +// pConfig: pointer to effect_config_t structure holding input and output +// configuration parameters +// +// Outputs: +// +//---------------------------------------------------------------------------- + +void Reverb_getConfig(ReverbContext *pContext, effect_config_t *pConfig) +{ + memcpy(pConfig, &pContext->config, sizeof(effect_config_t)); +} /* end Reverb_getConfig */ //---------------------------------------------------------------------------- // Reverb_init() @@ -1924,24 +1943,36 @@ int Reverb_command(effect_handle_t self, *(int *) pReplyData = 0; break; - case EFFECT_CMD_CONFIGURE: + case EFFECT_CMD_SET_CONFIG: //ALOGV("\tReverb_command cmdCode Case: " - // "EFFECT_CMD_CONFIGURE start"); - if (pCmdData == NULL|| - cmdSize != sizeof(effect_config_t)|| - pReplyData == NULL|| - *replySize != sizeof(int)){ + // "EFFECT_CMD_SET_CONFIG start"); + if (pCmdData == NULL || + cmdSize != sizeof(effect_config_t) || + pReplyData == NULL || + *replySize != sizeof(int)) { + ALOGV("\tLVM_ERROR : Reverb_command cmdCode Case: " + "EFFECT_CMD_SET_CONFIG: ERROR"); + return -EINVAL; + } + *(int *) pReplyData = android::Reverb_setConfig(pContext, + (effect_config_t *) pCmdData); + break; + + case EFFECT_CMD_GET_CONFIG: + if (pReplyData == NULL || + *replySize != sizeof(effect_config_t)) { ALOGV("\tLVM_ERROR : Reverb_command cmdCode Case: " - "EFFECT_CMD_CONFIGURE: ERROR"); + "EFFECT_CMD_GET_CONFIG: ERROR"); return -EINVAL; } - *(int *) pReplyData = Reverb_configure(pContext, (effect_config_t *) pCmdData); + + android::Reverb_getConfig(pContext, (effect_config_t *)pReplyData); break; case EFFECT_CMD_RESET: //ALOGV("\tReverb_command cmdCode Case: " // "EFFECT_CMD_RESET start"); - Reverb_configure(pContext, &pContext->config); + Reverb_setConfig(pContext, &pContext->config); break; case EFFECT_CMD_GET_PARAM:{ diff --git a/media/libeffects/preprocessing/Android.mk b/media/libeffects/preprocessing/Android.mk index 77d40b6..7f7c7e1 100755 --- a/media/libeffects/preprocessing/Android.mk +++ b/media/libeffects/preprocessing/Android.mk @@ -13,7 +13,7 @@ LOCAL_SRC_FILES:= \ LOCAL_C_INCLUDES += \ external/webrtc/src \ external/webrtc/src/modules/interface \ - external/webrtc/src/modules/audio_processing/main/interface \ + external/webrtc/src/modules/audio_processing/interface \ system/media/audio_effects/include LOCAL_C_INCLUDES += $(call include-path-for, speex) diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp index 6267d1d..dc27d38 100755 --- a/media/libeffects/preprocessing/PreProcessing.cpp +++ b/media/libeffects/preprocessing/PreProcessing.cpp @@ -24,8 +24,8 @@ #include <audio_effects/effect_aec.h> #include <audio_effects/effect_agc.h> #include <audio_effects/effect_ns.h> -#include "modules/interface/module_common_types.h" -#include "modules/audio_processing/main/interface/audio_processing.h" +#include <module_common_types.h> +#include <audio_processing.h> #include "speex/speex_resampler.h" @@ -220,8 +220,8 @@ bool HasReverseStream(uint32_t procId) // Automatic Gain Control (AGC) //------------------------------------------------------------------------------ -static const int kAgcDefaultTargetLevel = 0; -static const int kAgcDefaultCompGain = 90; +static const int kAgcDefaultTargetLevel = 3; +static const int kAgcDefaultCompGain = 9; static const bool kAgcDefaultLimiter = true; int AgcInit (preproc_effect_t *effect) @@ -845,6 +845,17 @@ int Session_SetConfig(preproc_session_t *session, effect_config_t *config) config->inputCfg.samplingRate, config->inputCfg.channels); int status; + // if at least one process is enabled, do not accept configuration changes + if (session->enabledMsk) { + if (session->samplingRate != config->inputCfg.samplingRate || + session->inChannelCount != inCnl || + session->outChannelCount != outCnl) { + return -ENOSYS; + } else { + return 0; + } + } + // AEC implementation is limited to 16kHz if (config->inputCfg.samplingRate >= 32000 && !(session->createdMsk & (1 << PREPROC_AEC))) { session->apmSamplingRate = 32000; @@ -940,6 +951,19 @@ int Session_SetConfig(preproc_session_t *session, effect_config_t *config) return 0; } +void Session_GetConfig(preproc_session_t *session, effect_config_t *config) +{ + memset(config, 0, sizeof(effect_config_t)); + config->inputCfg.samplingRate = config->outputCfg.samplingRate = session->samplingRate; + config->inputCfg.format = config->outputCfg.format = AUDIO_FORMAT_PCM_16_BIT; + config->inputCfg.channels = session->inChannelCount == 1 ? + AUDIO_CHANNEL_IN_MONO : AUDIO_CHANNEL_IN_STEREO; + config->outputCfg.channels = session->outChannelCount == 1 ? + AUDIO_CHANNEL_IN_MONO : AUDIO_CHANNEL_IN_STEREO; + config->inputCfg.mask = config->outputCfg.mask = + (EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | EFFECT_CONFIG_FORMAT); +} + int Session_SetReverseConfig(preproc_session_t *session, effect_config_t *config) { if (config->inputCfg.samplingRate != config->outputCfg.samplingRate || @@ -969,6 +993,17 @@ int Session_SetReverseConfig(preproc_session_t *session, effect_config_t *config return 0; } +void Session_GetReverseConfig(preproc_session_t *session, effect_config_t *config) +{ + memset(config, 0, sizeof(effect_config_t)); + config->inputCfg.samplingRate = config->outputCfg.samplingRate = session->samplingRate; + config->inputCfg.format = config->outputCfg.format = AUDIO_FORMAT_PCM_16_BIT; + config->inputCfg.channels = config->outputCfg.channels = + session->revChannelCount == 1 ? AUDIO_CHANNEL_IN_MONO : AUDIO_CHANNEL_IN_STEREO; + config->inputCfg.mask = config->outputCfg.mask = + (EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS | EFFECT_CONFIG_FORMAT); +} + void Session_SetProcEnabled(preproc_session_t *session, uint32_t procId, bool enabled) { if (enabled) { @@ -1048,7 +1083,7 @@ int PreProc_Init() { return sInitStatus; } -const effect_descriptor_t *PreProc_GetDescriptor(effect_uuid_t *uuid) +const effect_descriptor_t *PreProc_GetDescriptor(const effect_uuid_t *uuid) { size_t i; for (i = 0; i < PREPROC_NUM_EFFECTS; i++) { @@ -1250,29 +1285,42 @@ int PreProcessingFx_Command(effect_handle_t self, *(int *)pReplyData = 0; break; - case EFFECT_CMD_CONFIGURE: + case EFFECT_CMD_SET_CONFIG: if (pCmdData == NULL|| cmdSize != sizeof(effect_config_t)|| pReplyData == NULL|| *replySize != sizeof(int)){ ALOGV("PreProcessingFx_Command cmdCode Case: " - "EFFECT_CMD_CONFIGURE: ERROR"); + "EFFECT_CMD_SET_CONFIG: ERROR"); return -EINVAL; } *(int *)pReplyData = Session_SetConfig(effect->session, (effect_config_t *)pCmdData); if (*(int *)pReplyData != 0) { break; } - *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG); + if (effect->state != PREPROC_EFFECT_STATE_ACTIVE) { + *(int *)pReplyData = Effect_SetState(effect, PREPROC_EFFECT_STATE_CONFIG); + } break; - case EFFECT_CMD_CONFIGURE_REVERSE: - if (pCmdData == NULL|| - cmdSize != sizeof(effect_config_t)|| - pReplyData == NULL|| - *replySize != sizeof(int)){ + case EFFECT_CMD_GET_CONFIG: + if (pReplyData == NULL || + *replySize != sizeof(effect_config_t)) { + ALOGV("\tLVM_ERROR : PreProcessingFx_Command cmdCode Case: " + "EFFECT_CMD_GET_CONFIG: ERROR"); + return -EINVAL; + } + + Session_GetConfig(effect->session, (effect_config_t *)pReplyData); + break; + + case EFFECT_CMD_SET_CONFIG_REVERSE: + if (pCmdData == NULL || + cmdSize != sizeof(effect_config_t) || + pReplyData == NULL || + *replySize != sizeof(int)) { ALOGV("PreProcessingFx_Command cmdCode Case: " - "EFFECT_CMD_CONFIGURE_REVERSE: ERROR"); + "EFFECT_CMD_SET_CONFIG_REVERSE: ERROR"); return -EINVAL; } *(int *)pReplyData = Session_SetReverseConfig(effect->session, @@ -1282,6 +1330,16 @@ int PreProcessingFx_Command(effect_handle_t self, } break; + case EFFECT_CMD_GET_CONFIG_REVERSE: + if (pReplyData == NULL || + *replySize != sizeof(effect_config_t)){ + ALOGV("PreProcessingFx_Command cmdCode Case: " + "EFFECT_CMD_GET_CONFIG_REVERSE: ERROR"); + return -EINVAL; + } + Session_GetReverseConfig(effect->session, (effect_config_t *)pCmdData); + break; + case EFFECT_CMD_RESET: if (effect->ops->reset) { effect->ops->reset(effect); @@ -1523,7 +1581,7 @@ int PreProcessingLib_QueryEffect(uint32_t index, effect_descriptor_t *pDescripto return 0; } -int PreProcessingLib_Create(effect_uuid_t *uuid, +int PreProcessingLib_Create(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pInterface) @@ -1575,7 +1633,7 @@ int PreProcessingLib_Release(effect_handle_t interface) return Session_ReleaseEffect(fx->session, fx); } -int PreProcessingLib_GetDescriptor(effect_uuid_t *uuid, +int PreProcessingLib_GetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) { if (pDescriptor == NULL || uuid == NULL){ diff --git a/media/libeffects/testlibs/AudioBiquadFilter.cpp b/media/libeffects/testlibs/AudioBiquadFilter.cpp index 72917a3..16dd1c5 100644 --- a/media/libeffects/testlibs/AudioBiquadFilter.cpp +++ b/media/libeffects/testlibs/AudioBiquadFilter.cpp @@ -17,12 +17,10 @@ #include <string.h> #include <assert.h> +#include <cutils/compiler.h> #include "AudioBiquadFilter.h" -#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true )) -#define UNLIKELY( exp ) (__builtin_expect( (exp) != 0, false )) - namespace android { const audio_coef_t AudioBiquadFilter::IDENTITY_COEFS[AudioBiquadFilter::NUM_COEFS] = { AUDIO_COEF_ONE, 0, 0, 0, 0 }; @@ -55,7 +53,7 @@ void AudioBiquadFilter::clear() { void AudioBiquadFilter::setCoefs(const audio_coef_t coefs[NUM_COEFS], bool immediate) { memcpy(mTargetCoefs, coefs, sizeof(mTargetCoefs)); if (mState & STATE_ENABLED_MASK) { - if (UNLIKELY(immediate)) { + if (CC_UNLIKELY(immediate)) { memcpy(mCoefs, coefs, sizeof(mCoefs)); setState(STATE_NORMAL); } else { @@ -70,7 +68,7 @@ void AudioBiquadFilter::process(const audio_sample_t in[], audio_sample_t out[], } void AudioBiquadFilter::enable(bool immediate) { - if (UNLIKELY(immediate)) { + if (CC_UNLIKELY(immediate)) { memcpy(mCoefs, mTargetCoefs, sizeof(mCoefs)); setState(STATE_NORMAL); } else { @@ -79,7 +77,7 @@ void AudioBiquadFilter::enable(bool immediate) { } void AudioBiquadFilter::disable(bool immediate) { - if (UNLIKELY(immediate)) { + if (CC_UNLIKELY(immediate)) { memcpy(mCoefs, IDENTITY_COEFS, sizeof(mCoefs)); setState(STATE_BYPASS); } else { @@ -142,7 +140,7 @@ void AudioBiquadFilter::process_bypass(const audio_sample_t * in, audio_sample_t * out, int frameCount) { // The common case is in-place processing, because this is what the EQ does. - if (UNLIKELY(in != out)) { + if (CC_UNLIKELY(in != out)) { memcpy(out, in, frameCount * mNumChannels * sizeof(audio_sample_t)); } } diff --git a/media/libeffects/testlibs/AudioCoefInterpolator.cpp b/media/libeffects/testlibs/AudioCoefInterpolator.cpp index 039ab9f..6b56922 100644 --- a/media/libeffects/testlibs/AudioCoefInterpolator.cpp +++ b/media/libeffects/testlibs/AudioCoefInterpolator.cpp @@ -16,10 +16,10 @@ */ #include <string.h> -#include "AudioCoefInterpolator.h" -#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true )) -#define UNLIKELY( exp ) (__builtin_expect( (exp) != 0, false )) +#include <cutils/compiler.h> + +#include "AudioCoefInterpolator.h" namespace android { @@ -44,9 +44,9 @@ void AudioCoefInterpolator::getCoef(const int intCoord[], uint32_t fracCoord[], size_t index = 0; size_t dim = mNumInDims; while (dim-- > 0) { - if (UNLIKELY(intCoord[dim] < 0)) { + if (CC_UNLIKELY(intCoord[dim] < 0)) { fracCoord[dim] = 0; - } else if (UNLIKELY(intCoord[dim] >= (int)mInDims[dim] - 1)) { + } else if (CC_UNLIKELY(intCoord[dim] >= (int)mInDims[dim] - 1)) { fracCoord[dim] = 0; index += mInDimOffsets[dim] * (mInDims[dim] - 1); } else { @@ -63,7 +63,7 @@ void AudioCoefInterpolator::getCoefRecurse(size_t index, memcpy(out, mTable + index, mNumOutDims * sizeof(audio_coef_t)); } else { getCoefRecurse(index, fracCoord, out, dim + 1); - if (LIKELY(fracCoord != 0)) { + if (CC_LIKELY(fracCoord != 0)) { audio_coef_t tempCoef[MAX_OUT_DIMS]; getCoefRecurse(index + mInDimOffsets[dim], fracCoord, tempCoef, dim + 1); diff --git a/media/libeffects/testlibs/AudioCommon.h b/media/libeffects/testlibs/AudioCommon.h index 444f93a..e8080dc 100644 --- a/media/libeffects/testlibs/AudioCommon.h +++ b/media/libeffects/testlibs/AudioCommon.h @@ -20,6 +20,7 @@ #include <stdint.h> #include <stddef.h> +#include <cutils/compiler.h> namespace android { @@ -76,9 +77,9 @@ inline int16_t audio_sample_t_to_s15(audio_sample_t sample) { // Convert a audio_sample_t sample to S15 (with clipping) inline int16_t audio_sample_t_to_s15_clip(audio_sample_t sample) { // TODO: optimize for targets supporting this as an atomic operation. - if (__builtin_expect(sample >= (0x7FFF << 9), 0)) { + if (CC_UNLIKELY(sample >= (0x7FFF << 9))) { return 0x7FFF; - } else if (__builtin_expect(sample <= -(0x8000 << 9), 0)) { + } else if (CC_UNLIKELY(sample <= -(0x8000 << 9))) { return 0x8000; } else { return audio_sample_t_to_s15(sample); diff --git a/media/libeffects/testlibs/AudioPeakingFilter.cpp b/media/libeffects/testlibs/AudioPeakingFilter.cpp index 60fefe6..99323ac 100644 --- a/media/libeffects/testlibs/AudioPeakingFilter.cpp +++ b/media/libeffects/testlibs/AudioPeakingFilter.cpp @@ -21,9 +21,7 @@ #include <new> #include <assert.h> - -#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true )) -#define UNLIKELY( exp ) (__builtin_expect( (exp) != 0, false )) +#include <cutils/compiler.h> namespace android { // Format of the coefficient table: @@ -66,12 +64,12 @@ void AudioPeakingFilter::reset() { void AudioPeakingFilter::setFrequency(uint32_t millihertz) { mNominalFrequency = millihertz; - if (UNLIKELY(millihertz > mNiquistFreq / 2)) { + if (CC_UNLIKELY(millihertz > mNiquistFreq / 2)) { millihertz = mNiquistFreq / 2; } uint32_t normFreq = static_cast<uint32_t>( (static_cast<uint64_t>(millihertz) * mFrequencyFactor) >> 10); - if (LIKELY(normFreq > (1 << 23))) { + if (CC_LIKELY(normFreq > (1 << 23))) { mFrequency = (Effects_log2(normFreq) - ((32-9) << 15)) << (FREQ_PRECISION_BITS - 15); } else { mFrequency = 0; @@ -107,11 +105,11 @@ void AudioPeakingFilter::getBandRange(uint32_t & low, uint32_t & high) const { int32_t halfBW = (((mBandwidth + 1) / 2) << 15) / 1200; low = static_cast<uint32_t>((static_cast<uint64_t>(mNominalFrequency) * Effects_exp2(-halfBW + (16 << 15))) >> 16); - if (UNLIKELY(halfBW >= (16 << 15))) { + if (CC_UNLIKELY(halfBW >= (16 << 15))) { high = mNiquistFreq; } else { high = static_cast<uint32_t>((static_cast<uint64_t>(mNominalFrequency) * Effects_exp2(halfBW + (16 << 15))) >> 16); - if (UNLIKELY(high > mNiquistFreq)) { + if (CC_UNLIKELY(high > mNiquistFreq)) { high = mNiquistFreq; } } diff --git a/media/libeffects/testlibs/AudioShelvingFilter.cpp b/media/libeffects/testlibs/AudioShelvingFilter.cpp index b8650ba..e031287 100644 --- a/media/libeffects/testlibs/AudioShelvingFilter.cpp +++ b/media/libeffects/testlibs/AudioShelvingFilter.cpp @@ -21,9 +21,7 @@ #include <new> #include <assert.h> - -#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true )) -#define UNLIKELY( exp ) (__builtin_expect( (exp) != 0, false )) +#include <cutils/compiler.h> namespace android { // Format of the coefficient tables: @@ -71,13 +69,13 @@ void AudioShelvingFilter::reset() { void AudioShelvingFilter::setFrequency(uint32_t millihertz) { mNominalFrequency = millihertz; - if (UNLIKELY(millihertz > mNiquistFreq / 2)) { + if (CC_UNLIKELY(millihertz > mNiquistFreq / 2)) { millihertz = mNiquistFreq / 2; } uint32_t normFreq = static_cast<uint32_t>( (static_cast<uint64_t>(millihertz) * mFrequencyFactor) >> 10); uint32_t log2minFreq = (mType == kLowShelf ? (32-10) : (32-2)); - if (LIKELY(normFreq > (1U << log2minFreq))) { + if (CC_LIKELY(normFreq > (1U << log2minFreq))) { mFrequency = (Effects_log2(normFreq) - (log2minFreq << 15)) << (FREQ_PRECISION_BITS - 15); } else { mFrequency = 0; diff --git a/media/libeffects/testlibs/EffectEqualizer.cpp b/media/libeffects/testlibs/EffectEqualizer.cpp index 43f34de..35a4a61 100644 --- a/media/libeffects/testlibs/EffectEqualizer.cpp +++ b/media/libeffects/testlibs/EffectEqualizer.cpp @@ -114,7 +114,7 @@ struct EqualizerContext { //--- local function prototypes int Equalizer_init(EqualizerContext *pContext); -int Equalizer_configure(EqualizerContext *pContext, effect_config_t *pConfig); +int Equalizer_setConfig(EqualizerContext *pContext, effect_config_t *pConfig); int Equalizer_getParameter(AudioEqualizer * pEqualizer, int32_t *pParam, size_t *pValueSize, void *pValue); int Equalizer_setParameter(AudioEqualizer * pEqualizer, int32_t *pParam, void *pValue); @@ -140,7 +140,7 @@ extern "C" int EffectQueryEffect(uint32_t index, return 0; } /* end EffectQueryNext */ -extern "C" int EffectCreate(effect_uuid_t *uuid, +extern "C" int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle) { @@ -195,7 +195,7 @@ extern "C" int EffectRelease(effect_handle_t handle) { return 0; } /* end EffectRelease */ -extern "C" int EffectGetDescriptor(effect_uuid_t *uuid, +extern "C" int EffectGetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) { if (pDescriptor == NULL || uuid == NULL){ @@ -224,7 +224,7 @@ extern "C" int EffectGetDescriptor(effect_uuid_t *uuid, } //---------------------------------------------------------------------------- -// Equalizer_configure() +// Equalizer_setConfig() //---------------------------------------------------------------------------- // Purpose: Set input and output audio configuration. // @@ -237,9 +237,9 @@ extern "C" int EffectGetDescriptor(effect_uuid_t *uuid, // //---------------------------------------------------------------------------- -int Equalizer_configure(EqualizerContext *pContext, effect_config_t *pConfig) +int Equalizer_setConfig(EqualizerContext *pContext, effect_config_t *pConfig) { - ALOGV("Equalizer_configure start"); + ALOGV("Equalizer_setConfig start"); CHECK_ARG(pContext != NULL); CHECK_ARG(pConfig != NULL); @@ -272,7 +272,26 @@ int Equalizer_configure(EqualizerContext *pContext, effect_config_t *pConfig) pConfig->outputCfg.accessMode); return 0; -} // end Equalizer_configure +} // end Equalizer_setConfig + +//---------------------------------------------------------------------------- +// Equalizer_getConfig() +//---------------------------------------------------------------------------- +// Purpose: Get input and output audio configuration. +// +// Inputs: +// pContext: effect engine context +// pConfig: pointer to effect_config_t structure holding input and output +// configuration parameters +// +// Outputs: +// +//---------------------------------------------------------------------------- + +void Equalizer_getConfig(EqualizerContext *pContext, effect_config_t *pConfig) +{ + memcpy(pConfig, &pContext->config, sizeof(effect_config_t)); +} // end Equalizer_getConfig //---------------------------------------------------------------------------- @@ -332,7 +351,7 @@ int Equalizer_init(EqualizerContext *pContext) pContext->pEqualizer->enable(true); - Equalizer_configure(pContext, &pContext->config); + Equalizer_setConfig(pContext, &pContext->config); return 0; } // end Equalizer_init @@ -643,16 +662,22 @@ extern "C" int Equalizer_command(effect_handle_t self, uint32_t cmdCode, uint32_ } *(int *) pReplyData = Equalizer_init(pContext); break; - case EFFECT_CMD_CONFIGURE: + case EFFECT_CMD_SET_CONFIG: if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) || pReplyData == NULL || *replySize != sizeof(int)) { return -EINVAL; } - *(int *) pReplyData = Equalizer_configure(pContext, + *(int *) pReplyData = Equalizer_setConfig(pContext, (effect_config_t *) pCmdData); break; + case EFFECT_CMD_GET_CONFIG: + if (pReplyData == NULL || *replySize != sizeof(effect_config_t)) { + return -EINVAL; + } + Equalizer_getConfig(pContext, (effect_config_t *) pCmdData); + break; case EFFECT_CMD_RESET: - Equalizer_configure(pContext, &pContext->config); + Equalizer_setConfig(pContext, &pContext->config); break; case EFFECT_CMD_GET_PARAM: { if (pCmdData == NULL || cmdSize < (int)(sizeof(effect_param_t) + sizeof(int32_t)) || diff --git a/media/libeffects/testlibs/EffectReverb.c b/media/libeffects/testlibs/EffectReverb.c index d22868a..8351712 100644 --- a/media/libeffects/testlibs/EffectReverb.c +++ b/media/libeffects/testlibs/EffectReverb.c @@ -111,7 +111,7 @@ int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor) { return 0; } -int EffectCreate(effect_uuid_t *uuid, +int EffectCreate(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle) { @@ -182,7 +182,7 @@ int EffectRelease(effect_handle_t handle) { return 0; } -int EffectGetDescriptor(effect_uuid_t *uuid, +int EffectGetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) { int i; int length = sizeof(gDescriptors) / sizeof(const effect_descriptor_t *); @@ -318,14 +318,20 @@ static int Reverb_Command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSi pRvbModule->context.mState = REVERB_STATE_INITIALIZED; } break; - case EFFECT_CMD_CONFIGURE: + case EFFECT_CMD_SET_CONFIG: if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) || pReplyData == NULL || *replySize != sizeof(int)) { return -EINVAL; } - *(int *) pReplyData = Reverb_Configure(pRvbModule, + *(int *) pReplyData = Reverb_setConfig(pRvbModule, (effect_config_t *)pCmdData, false); break; + case EFFECT_CMD_GET_CONFIG: + if (pReplyData == NULL || *replySize != sizeof(effect_config_t)) { + return -EINVAL; + } + Reverb_getConfig(pRvbModule, (effect_config_t *) pCmdData); + break; case EFFECT_CMD_RESET: Reverb_Reset(pReverb, false); break; @@ -492,7 +498,7 @@ int Reverb_Init(reverb_module_t *pRvbModule, int aux, int preset) { pRvbModule->config.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE; pRvbModule->config.outputCfg.mask = EFFECT_CONFIG_ALL; - ret = Reverb_Configure(pRvbModule, &pRvbModule->config, true); + ret = Reverb_setConfig(pRvbModule, &pRvbModule->config, true); if (ret < 0) { ALOGV("Reverb_Init error %d on module %p", ret, pRvbModule); } @@ -501,7 +507,7 @@ int Reverb_Init(reverb_module_t *pRvbModule, int aux, int preset) { } /*---------------------------------------------------------------------------- - * Reverb_Init() + * Reverb_setConfig() *---------------------------------------------------------------------------- * Purpose: * Set input and output audio configuration. @@ -518,7 +524,7 @@ int Reverb_Init(reverb_module_t *pRvbModule, int aux, int preset) { *---------------------------------------------------------------------------- */ -int Reverb_Configure(reverb_module_t *pRvbModule, effect_config_t *pConfig, +int Reverb_setConfig(reverb_module_t *pRvbModule, effect_config_t *pConfig, bool init) { reverb_object_t *pReverb = &pRvbModule->context; int bufferSizeInSamples; @@ -531,12 +537,12 @@ int Reverb_Configure(reverb_module_t *pRvbModule, effect_config_t *pConfig, || pConfig->outputCfg.channels != OUTPUT_CHANNELS || pConfig->inputCfg.format != AUDIO_FORMAT_PCM_16_BIT || pConfig->outputCfg.format != AUDIO_FORMAT_PCM_16_BIT) { - ALOGV("Reverb_Configure invalid config"); + ALOGV("Reverb_setConfig invalid config"); return -EINVAL; } if ((pReverb->m_Aux && (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_MONO)) || (!pReverb->m_Aux && (pConfig->inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO))) { - ALOGV("Reverb_Configure invalid config"); + ALOGV("Reverb_setConfig invalid config"); return -EINVAL; } @@ -576,7 +582,7 @@ int Reverb_Configure(reverb_module_t *pRvbModule, effect_config_t *pConfig, pReverb->m_nCosWT_5KHz = 25997; break; default: - ALOGV("Reverb_Configure invalid sampling rate %d", pReverb->m_nSamplingRate); + ALOGV("Reverb_setConfig invalid sampling rate %d", pReverb->m_nSamplingRate); return -EINVAL; } @@ -620,6 +626,28 @@ int Reverb_Configure(reverb_module_t *pRvbModule, effect_config_t *pConfig, } /*---------------------------------------------------------------------------- + * Reverb_getConfig() + *---------------------------------------------------------------------------- + * Purpose: + * Get input and output audio configuration. + * + * Inputs: + * pRvbModule - pointer to reverb effect module + * pConfig - pointer to effect_config_t structure containing input + * and output audio parameters configuration + * Outputs: + * + * Side Effects: + * + *---------------------------------------------------------------------------- + */ + +void Reverb_getConfig(reverb_module_t *pRvbModule, effect_config_t *pConfig) +{ + memcpy(pConfig, &pRvbModule->config, sizeof(effect_config_t)); +} + +/*---------------------------------------------------------------------------- * Reverb_Reset() *---------------------------------------------------------------------------- * Purpose: @@ -844,7 +872,7 @@ int Reverb_getParameter(reverb_object_t *pReverb, int32_t param, size_t *pSize, if (param == REVERB_PARAM_ROOM_HF_LEVEL) { break; } - pValue32 = &pProperties->decayTime; + pValue32 = (int32_t *)&pProperties->decayTime; /* FALL THROUGH */ case REVERB_PARAM_DECAY_TIME: @@ -916,7 +944,7 @@ int Reverb_getParameter(reverb_object_t *pReverb, int32_t param, size_t *pSize, if (param == REVERB_PARAM_REFLECTIONS_LEVEL) { break; } - pValue32 = &pProperties->reflectionsDelay; + pValue32 = (int32_t *)&pProperties->reflectionsDelay; /* FALL THROUGH */ case REVERB_PARAM_REFLECTIONS_DELAY: @@ -940,7 +968,7 @@ int Reverb_getParameter(reverb_object_t *pReverb, int32_t param, size_t *pSize, if (param == REVERB_PARAM_REVERB_LEVEL) { break; } - pValue32 = &pProperties->reverbDelay; + pValue32 = (int32_t *)&pProperties->reverbDelay; /* FALL THROUGH */ case REVERB_PARAM_REVERB_DELAY: diff --git a/media/libeffects/testlibs/EffectReverb.h b/media/libeffects/testlibs/EffectReverb.h index 8e2cc31..1fb14a7 100644 --- a/media/libeffects/testlibs/EffectReverb.h +++ b/media/libeffects/testlibs/EffectReverb.h @@ -303,12 +303,12 @@ typedef struct reverb_module_s { int EffectQueryNumberEffects(uint32_t *pNumEffects); int EffectQueryEffect(uint32_t index, effect_descriptor_t *pDescriptor); -int EffectCreate(effect_uuid_t *effectUID, +int EffectCreate(const effect_uuid_t *effectUID, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle); int EffectRelease(effect_handle_t handle); -int EffectGetDescriptor(effect_uuid_t *uuid, +int EffectGetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor); static int Reverb_Process(effect_handle_t self, @@ -329,7 +329,8 @@ static int Reverb_GetDescriptor(effect_handle_t self, */ int Reverb_Init(reverb_module_t *pRvbModule, int aux, int preset); -int Reverb_Configure(reverb_module_t *pRvbModule, effect_config_t *pConfig, bool init); +int Reverb_setConfig(reverb_module_t *pRvbModule, effect_config_t *pConfig, bool init); +void Reverb_getConfig(reverb_module_t *pRvbModule, effect_config_t *pConfig); void Reverb_Reset(reverb_object_t *pReverb, bool init); int Reverb_setParameter (reverb_object_t *pReverb, int32_t param, size_t size, void *pValue); diff --git a/media/libeffects/visualizer/EffectVisualizer.cpp b/media/libeffects/visualizer/EffectVisualizer.cpp index c441710..51c8b68 100644 --- a/media/libeffects/visualizer/EffectVisualizer.cpp +++ b/media/libeffects/visualizer/EffectVisualizer.cpp @@ -78,7 +78,7 @@ void Visualizer_reset(VisualizerContext *pContext) } //---------------------------------------------------------------------------- -// Visualizer_configure() +// Visualizer_setConfig() //---------------------------------------------------------------------------- // Purpose: Set input and output audio configuration. // @@ -91,9 +91,9 @@ void Visualizer_reset(VisualizerContext *pContext) // //---------------------------------------------------------------------------- -int Visualizer_configure(VisualizerContext *pContext, effect_config_t *pConfig) +int Visualizer_setConfig(VisualizerContext *pContext, effect_config_t *pConfig) { - ALOGV("Visualizer_configure start"); + ALOGV("Visualizer_setConfig start"); if (pConfig->inputCfg.samplingRate != pConfig->outputCfg.samplingRate) return -EINVAL; if (pConfig->inputCfg.channels != pConfig->outputCfg.channels) return -EINVAL; @@ -112,6 +112,26 @@ int Visualizer_configure(VisualizerContext *pContext, effect_config_t *pConfig) //---------------------------------------------------------------------------- +// Visualizer_getConfig() +//---------------------------------------------------------------------------- +// Purpose: Get input and output audio configuration. +// +// Inputs: +// pContext: effect engine context +// pConfig: pointer to effect_config_t structure holding input and output +// configuration parameters +// +// Outputs: +// +//---------------------------------------------------------------------------- + +void Visualizer_getConfig(VisualizerContext *pContext, effect_config_t *pConfig) +{ + memcpy(pConfig, &pContext->mConfig, sizeof(effect_config_t)); +} + + +//---------------------------------------------------------------------------- // Visualizer_init() //---------------------------------------------------------------------------- // Purpose: Initialize engine with default configuration. @@ -144,7 +164,7 @@ int Visualizer_init(VisualizerContext *pContext) pContext->mCaptureSize = VISUALIZER_CAPTURE_SIZE_MAX; - Visualizer_configure(pContext, &pContext->mConfig); + Visualizer_setConfig(pContext, &pContext->mConfig); return 0; } @@ -170,7 +190,7 @@ int VisualizerLib_QueryEffect(uint32_t index, return 0; } -int VisualizerLib_Create(effect_uuid_t *uuid, +int VisualizerLib_Create(const effect_uuid_t *uuid, int32_t sessionId, int32_t ioId, effect_handle_t *pHandle) { @@ -220,7 +240,7 @@ int VisualizerLib_Release(effect_handle_t handle) { return 0; } -int VisualizerLib_GetDescriptor(effect_uuid_t *uuid, +int VisualizerLib_GetDescriptor(const effect_uuid_t *uuid, effect_descriptor_t *pDescriptor) { if (pDescriptor == NULL || uuid == NULL){ @@ -337,14 +357,21 @@ int Visualizer_command(effect_handle_t self, uint32_t cmdCode, uint32_t cmdSize, } *(int *) pReplyData = Visualizer_init(pContext); break; - case EFFECT_CMD_CONFIGURE: + case EFFECT_CMD_SET_CONFIG: if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) || pReplyData == NULL || *replySize != sizeof(int)) { return -EINVAL; } - *(int *) pReplyData = Visualizer_configure(pContext, + *(int *) pReplyData = Visualizer_setConfig(pContext, (effect_config_t *) pCmdData); break; + case EFFECT_CMD_GET_CONFIG: + if (pReplyData == NULL || + *replySize != sizeof(effect_config_t)) { + return -EINVAL; + } + Visualizer_getConfig(pContext, (effect_config_t *)pReplyData); + break; case EFFECT_CMD_RESET: Visualizer_reset(pContext); break; diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk index 7af4a87..23670df 100644 --- a/media/libmedia/Android.mk +++ b/media/libmedia/Android.mk @@ -43,13 +43,12 @@ LOCAL_SRC_FILES:= \ IEffectClient.cpp \ AudioEffect.cpp \ Visualizer.cpp \ - MemoryLeakTrackUtil.cpp \ - fixedfft.cpp.arm + MemoryLeakTrackUtil.cpp LOCAL_SHARED_LIBRARIES := \ libui libcutils libutils libbinder libsonivox libicuuc libexpat \ libcamera_client libstagefright_foundation \ - libgui libdl + libgui libdl libaudioutils LOCAL_WHOLE_STATIC_LIBRARY := libmedia_helper @@ -61,6 +60,7 @@ LOCAL_C_INCLUDES := \ $(TOP)/frameworks/base/include/media/stagefright/openmax \ external/icu4c/common \ external/expat/lib \ - system/media/audio_effects/include + system/media/audio_effects/include \ + system/media/audio_utils/include include $(BUILD_SHARED_LIBRARY) diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp index 6639d06..6808aa2 100644 --- a/media/libmedia/AudioEffect.cpp +++ b/media/libmedia/AudioEffect.cpp @@ -159,7 +159,7 @@ status_t AudioEffect::set(const effect_uuid_t *type, mCblk->buffer = (uint8_t *)mCblk + bufOffset; iEffect->asBinder()->linkToDeath(mIEffectClient); - ALOGV("set() %p OK effect: %s id: %d status %d enabled %d, ", this, mDescriptor.name, mId, mStatus, mEnabled); + ALOGV("set() %p OK effect: %s id: %d status %d enabled %d", this, mDescriptor.name, mId, mStatus, mEnabled); return mStatus; } @@ -202,7 +202,7 @@ bool AudioEffect::getEnabled() const status_t AudioEffect::setEnabled(bool enabled) { if (mStatus != NO_ERROR) { - return INVALID_OPERATION; + return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus; } status_t status = NO_ERROR; @@ -231,7 +231,7 @@ status_t AudioEffect::command(uint32_t cmdCode, { if (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS) { ALOGV("command() bad status %d", mStatus); - return INVALID_OPERATION; + return mStatus; } if (cmdCode == EFFECT_CMD_ENABLE || cmdCode == EFFECT_CMD_DISABLE) { @@ -263,7 +263,7 @@ status_t AudioEffect::command(uint32_t cmdCode, status_t AudioEffect::setParameter(effect_param_t *param) { if (mStatus != NO_ERROR) { - return INVALID_OPERATION; + return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus; } if (param == NULL || param->psize == 0 || param->vsize == 0) { @@ -281,7 +281,7 @@ status_t AudioEffect::setParameter(effect_param_t *param) status_t AudioEffect::setParameterDeferred(effect_param_t *param) { if (mStatus != NO_ERROR) { - return INVALID_OPERATION; + return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus; } if (param == NULL || param->psize == 0 || param->vsize == 0) { @@ -307,7 +307,7 @@ status_t AudioEffect::setParameterDeferred(effect_param_t *param) status_t AudioEffect::setParameterCommit() { if (mStatus != NO_ERROR) { - return INVALID_OPERATION; + return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus; } Mutex::Autolock _l(mCblk->lock); @@ -321,7 +321,7 @@ status_t AudioEffect::setParameterCommit() status_t AudioEffect::getParameter(effect_param_t *param) { if (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS) { - return INVALID_OPERATION; + return mStatus; } if (param == NULL || param->psize == 0 || param->vsize == 0) { @@ -341,8 +341,8 @@ status_t AudioEffect::getParameter(effect_param_t *param) void AudioEffect::binderDied() { ALOGW("IEffect died"); - mStatus = NO_INIT; - if (mCbf) { + mStatus = DEAD_OBJECT; + if (mCbf != NULL) { status_t status = DEAD_OBJECT; mCbf(EVENT_ERROR, mUserData, &status); } @@ -363,7 +363,7 @@ void AudioEffect::controlStatusChanged(bool controlGranted) mStatus = ALREADY_EXISTS; } } - if (mCbf) { + if (mCbf != NULL) { mCbf(EVENT_CONTROL_STATUS_CHANGED, mUserData, &controlGranted); } } @@ -373,7 +373,7 @@ void AudioEffect::enableStatusChanged(bool enabled) ALOGV("enableStatusChanged %p enabled %d mCbf %p", this, enabled, mCbf); if (mStatus == ALREADY_EXISTS) { mEnabled = enabled; - if (mCbf) { + if (mCbf != NULL) { mCbf(EVENT_ENABLE_STATUS_CHANGED, mUserData, &enabled); } } @@ -389,7 +389,7 @@ void AudioEffect::commandExecuted(uint32_t cmdCode, return; } - if (mCbf && cmdCode == EFFECT_CMD_SET_PARAM) { + if (mCbf != NULL && cmdCode == EFFECT_CMD_SET_PARAM) { effect_param_t *cmd = (effect_param_t *)cmdData; cmd->status = *(int32_t *)replyData; mCbf(EVENT_PARAMETER_CHANGED, mUserData, cmd); @@ -412,7 +412,8 @@ status_t AudioEffect::queryEffect(uint32_t index, effect_descriptor_t *descripto return af->queryEffect(index, descriptor); } -status_t AudioEffect::getEffectDescriptor(effect_uuid_t *uuid, effect_descriptor_t *descriptor) +status_t AudioEffect::getEffectDescriptor(const effect_uuid_t *uuid, + effect_descriptor_t *descriptor) /*const*/ { const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); if (af == 0) return PERMISSION_DENIED; diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp index 34a5eb7..943f3af 100644 --- a/media/libmedia/AudioRecord.cpp +++ b/media/libmedia/AudioRecord.cpp @@ -39,9 +39,7 @@ #include <system/audio.h> #include <cutils/bitops.h> - -#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true )) -#define UNLIKELY( exp ) (__builtin_expect( (exp) != 0, false )) +#include <cutils/compiler.h> namespace android { // --------------------------------------------------------------------------- @@ -50,7 +48,7 @@ namespace android { status_t AudioRecord::getMinFrameCount( int* frameCount, uint32_t sampleRate, - int format, + audio_format_t format, int channelCount) { size_t size = 0; @@ -80,14 +78,15 @@ status_t AudioRecord::getMinFrameCount( // --------------------------------------------------------------------------- AudioRecord::AudioRecord() - : mStatus(NO_INIT), mSessionId(0) + : mStatus(NO_INIT), mSessionId(0), + mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT) { } AudioRecord::AudioRecord( - int inputSource, + audio_source_t inputSource, uint32_t sampleRate, - int format, + audio_format_t format, uint32_t channelMask, int frameCount, uint32_t flags, @@ -95,7 +94,8 @@ AudioRecord::AudioRecord( void* user, int notificationFrames, int sessionId) - : mStatus(NO_INIT), mSessionId(0) + : mStatus(NO_INIT), mSessionId(0), + mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT) { mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, flags, cbf, user, notificationFrames, sessionId); @@ -119,9 +119,9 @@ AudioRecord::~AudioRecord() } status_t AudioRecord::set( - int inputSource, + audio_source_t inputSource, uint32_t sampleRate, - int format, + audio_format_t format, uint32_t channelMask, int frameCount, uint32_t flags, @@ -148,7 +148,7 @@ status_t AudioRecord::set( sampleRate = DEFAULT_SAMPLE_RATE; } // these below should probably come from the audioFlinger too... - if (format == 0) { + if (format == AUDIO_FORMAT_DEFAULT) { format = AUDIO_FORMAT_PCM_16_BIT; } // validate parameters @@ -206,11 +206,8 @@ status_t AudioRecord::set( return status; } - if (cbf != 0) { + if (cbf != NULL) { mClientRecordThread = new ClientRecordThread(*this, threadCanCallJava); - if (mClientRecordThread == 0) { - return NO_INIT; - } } mStatus = NO_ERROR; @@ -231,7 +228,7 @@ status_t AudioRecord::set( mMarkerReached = false; mNewPosition = 0; mUpdatePeriod = 0; - mInputSource = (uint8_t)inputSource; + mInputSource = inputSource; mFlags = flags; mInput = input; AudioSystem::acquireAudioSessionId(mSessionId); @@ -251,7 +248,7 @@ uint32_t AudioRecord::latency() const return mLatency; } -int AudioRecord::format() const +audio_format_t AudioRecord::format() const { return mFormat; } @@ -266,7 +263,7 @@ uint32_t AudioRecord::frameCount() const return mFrameCount; } -int AudioRecord::frameSize() const +size_t AudioRecord::frameSize() const { if (audio_is_linear_pcm(mFormat)) { return channelCount()*audio_bytes_per_sample(mFormat); @@ -275,9 +272,9 @@ int AudioRecord::frameSize() const } } -int AudioRecord::inputSource() const +audio_source_t AudioRecord::inputSource() const { - return (int)mInputSource; + return mInputSource; } // ------------------------------------------------------------------------- @@ -296,7 +293,6 @@ status_t AudioRecord::start() return WOULD_BLOCK; } } - t->mLock.lock(); } AutoMutex lock(mLock); @@ -308,10 +304,25 @@ status_t AudioRecord::start() if (mActive == 0) { mActive = 1; + pid_t tid; + if (t != 0) { + mReadyToRun = WOULD_BLOCK; + t->run("AudioRecord", ANDROID_PRIORITY_AUDIO); + tid = t->getTid(); // pid_t is unknown until run() + ALOGV("getTid=%d", tid); + if (tid == -1) { + tid = 0; + } + // thread blocks in readyToRun() + } else { + tid = 0; // not gettid() + } + cblk->lock.lock(); if (!(cblk->flags & CBLK_INVALID_MSK)) { cblk->lock.unlock(); - ret = mAudioRecord->start(); + ALOGV("mAudioRecord->start(tid=%d)", tid); + ret = mAudioRecord->start(tid); cblk->lock.lock(); if (ret == DEAD_OBJECT) { android_atomic_or(CBLK_INVALID_ON, &cblk->flags); @@ -326,19 +337,22 @@ status_t AudioRecord::start() cblk->bufferTimeoutMs = MAX_RUN_TIMEOUT_MS; cblk->waitTimeMs = 0; if (t != 0) { - t->run("ClientRecordThread", ANDROID_PRIORITY_AUDIO); + // thread unblocks in readyToRun() and returns NO_ERROR + mReadyToRun = NO_ERROR; + mCondition.signal(); } else { - setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_AUDIO); + mPreviousPriority = getpriority(PRIO_PROCESS, 0); + mPreviousSchedulingGroup = androidGetThreadSchedulingGroup(0); + androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO); } } else { mActive = 0; + // thread unblocks in readyToRun() and returns NO_INIT + mReadyToRun = NO_INIT; + mCondition.signal(); } } - if (t != 0) { - t->mLock.unlock(); - } - return ret; } @@ -348,10 +362,6 @@ status_t AudioRecord::stop() ALOGV("stop"); - if (t != 0) { - t->mLock.lock(); - } - AutoMutex lock(mLock); if (mActive == 1) { mActive = 0; @@ -363,14 +373,11 @@ status_t AudioRecord::stop() if (t != 0) { t->requestExit(); } else { - setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL); + setpriority(PRIO_PROCESS, 0, mPreviousPriority); + androidSetThreadSchedulingGroup(0, mPreviousSchedulingGroup); } } - if (t != 0) { - t->mLock.unlock(); - } - return NO_ERROR; } @@ -379,7 +386,7 @@ bool AudioRecord::stopped() const return !mActive; } -uint32_t AudioRecord::getSampleRate() +uint32_t AudioRecord::getSampleRate() const { AutoMutex lock(mLock); return mCblk->sampleRate; @@ -387,7 +394,7 @@ uint32_t AudioRecord::getSampleRate() status_t AudioRecord::setMarkerPosition(uint32_t marker) { - if (mCbf == 0) return INVALID_OPERATION; + if (mCbf == NULL) return INVALID_OPERATION; mMarkerPosition = marker; mMarkerReached = false; @@ -395,9 +402,9 @@ status_t AudioRecord::setMarkerPosition(uint32_t marker) return NO_ERROR; } -status_t AudioRecord::getMarkerPosition(uint32_t *marker) +status_t AudioRecord::getMarkerPosition(uint32_t *marker) const { - if (marker == 0) return BAD_VALUE; + if (marker == NULL) return BAD_VALUE; *marker = mMarkerPosition; @@ -406,7 +413,7 @@ status_t AudioRecord::getMarkerPosition(uint32_t *marker) status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod) { - if (mCbf == 0) return INVALID_OPERATION; + if (mCbf == NULL) return INVALID_OPERATION; uint32_t curPosition; getPosition(&curPosition); @@ -416,18 +423,18 @@ status_t AudioRecord::setPositionUpdatePeriod(uint32_t updatePeriod) return NO_ERROR; } -status_t AudioRecord::getPositionUpdatePeriod(uint32_t *updatePeriod) +status_t AudioRecord::getPositionUpdatePeriod(uint32_t *updatePeriod) const { - if (updatePeriod == 0) return BAD_VALUE; + if (updatePeriod == NULL) return BAD_VALUE; *updatePeriod = mUpdatePeriod; return NO_ERROR; } -status_t AudioRecord::getPosition(uint32_t *position) +status_t AudioRecord::getPosition(uint32_t *position) const { - if (position == 0) return BAD_VALUE; + if (position == NULL) return BAD_VALUE; AutoMutex lock(mLock); *position = mCblk->user; @@ -435,7 +442,7 @@ status_t AudioRecord::getPosition(uint32_t *position) return NO_ERROR; } -unsigned int AudioRecord::getInputFramesLost() +unsigned int AudioRecord::getInputFramesLost() const { if (mActive) return AudioSystem::getInputFramesLost(mInput); @@ -448,7 +455,7 @@ unsigned int AudioRecord::getInputFramesLost() // must be called with mLock held status_t AudioRecord::openRecord_l( uint32_t sampleRate, - uint32_t format, + audio_format_t format, uint32_t channelMask, int frameCount, uint32_t flags, @@ -508,11 +515,11 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) goto start_loop_here; while (framesReady == 0) { active = mActive; - if (UNLIKELY(!active)) { + if (CC_UNLIKELY(!active)) { cblk->lock.unlock(); return NO_MORE_BUFFERS; } - if (UNLIKELY(!waitCount)) { + if (CC_UNLIKELY(!waitCount)) { cblk->lock.unlock(); return WOULD_BLOCK; } @@ -529,13 +536,13 @@ status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) if (cblk->flags & CBLK_INVALID_MSK) { goto create_new_record; } - if (__builtin_expect(result!=NO_ERROR, false)) { + if (CC_UNLIKELY(result != NO_ERROR)) { cblk->waitTimeMs += waitTimeMs; if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) { ALOGW( "obtainBuffer timed out (is the CPU pegged?) " "user=%08x, server=%08x", cblk->user, cblk->server); cblk->lock.unlock(); - result = mAudioRecord->start(); + result = mAudioRecord->start(0); // callback thread hasn't changed cblk->lock.lock(); if (result == DEAD_OBJECT) { android_atomic_or(CBLK_INVALID_ON, &cblk->flags); @@ -590,7 +597,7 @@ void AudioRecord::releaseBuffer(Buffer* audioBuffer) mCblk->stepUser(audioBuffer->frameCount); } -audio_io_handle_t AudioRecord::getInput() +audio_io_handle_t AudioRecord::getInput() const { AutoMutex lock(mLock); return mInput; @@ -608,7 +615,7 @@ audio_io_handle_t AudioRecord::getInput_l() return mInput; } -int AudioRecord::getSessionId() +int AudioRecord::getSessionId() const { return mSessionId; } @@ -773,7 +780,7 @@ status_t AudioRecord::restoreRecord_l(audio_track_cblk_t*& cblk) result = openRecord_l(cblk->sampleRate, mFormat, mChannelMask, mFrameCount, mFlags, getInput_l()); if (result == NO_ERROR) { - result = mAudioRecord->start(); + result = mAudioRecord->start(0); // callback thread hasn't changed } if (result != NO_ERROR) { mActive = false; @@ -824,6 +831,15 @@ bool AudioRecord::ClientRecordThread::threadLoop() return mReceiver.processAudioBuffer(this); } +status_t AudioRecord::ClientRecordThread::readyToRun() +{ + AutoMutex(mReceiver.mLock); + while (mReceiver.mReadyToRun == WOULD_BLOCK) { + mReceiver.mCondition.wait(mReceiver.mLock); + } + return mReceiver.mReadyToRun; +} + // ------------------------------------------------------------------------- }; // namespace android diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp index f7f129c..a1cbf0f 100644 --- a/media/libmedia/AudioSystem.cpp +++ b/media/libmedia/AudioSystem.cpp @@ -35,12 +35,13 @@ sp<IAudioFlinger> AudioSystem::gAudioFlinger; sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient; audio_error_callback AudioSystem::gAudioErrorCallback = NULL; // Cached values -DefaultKeyedVector<int, audio_io_handle_t> AudioSystem::gStreamOutputMap(0); + +DefaultKeyedVector<audio_stream_type_t, audio_io_handle_t> AudioSystem::gStreamOutputMap(0); DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(0); -// Cached values for recording queries +// Cached values for recording queries, all protected by gLock uint32_t AudioSystem::gPrevInSamplingRate = 16000; -int AudioSystem::gPrevInFormat = AUDIO_FORMAT_PCM_16_BIT; +audio_format_t AudioSystem::gPrevInFormat = AUDIO_FORMAT_PCM_16_BIT; int AudioSystem::gPrevInChannelCount = 1; size_t AudioSystem::gInBuffSize = 0; @@ -49,7 +50,7 @@ size_t AudioSystem::gInBuffSize = 0; const sp<IAudioFlinger>& AudioSystem::get_audio_flinger() { Mutex::Autolock _l(gLock); - if (gAudioFlinger.get() == 0) { + if (gAudioFlinger == 0) { sp<IServiceManager> sm = defaultServiceManager(); sp<IBinder> binder; do { @@ -120,7 +121,8 @@ status_t AudioSystem::getMasterMute(bool* mute) return NO_ERROR; } -status_t AudioSystem::setStreamVolume(int stream, float value, int output) +status_t AudioSystem::setStreamVolume(audio_stream_type_t stream, float value, + audio_io_handle_t output) { if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE; const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); @@ -129,7 +131,7 @@ status_t AudioSystem::setStreamVolume(int stream, float value, int output) return NO_ERROR; } -status_t AudioSystem::setStreamMute(int stream, bool mute) +status_t AudioSystem::setStreamMute(audio_stream_type_t stream, bool mute) { if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE; const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); @@ -138,7 +140,8 @@ status_t AudioSystem::setStreamMute(int stream, bool mute) return NO_ERROR; } -status_t AudioSystem::getStreamVolume(int stream, float* volume, int output) +status_t AudioSystem::getStreamVolume(audio_stream_type_t stream, float* volume, + audio_io_handle_t output) { if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE; const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); @@ -147,7 +150,7 @@ status_t AudioSystem::getStreamVolume(int stream, float* volume, int output) return NO_ERROR; } -status_t AudioSystem::getStreamMute(int stream, bool* mute) +status_t AudioSystem::getStreamMute(audio_stream_type_t stream, bool* mute) { if (uint32_t(stream) >= AUDIO_STREAM_CNT) return BAD_VALUE; const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); @@ -156,9 +159,9 @@ status_t AudioSystem::getStreamMute(int stream, bool* mute) return NO_ERROR; } -status_t AudioSystem::setMode(int mode) +status_t AudioSystem::setMode(audio_mode_t mode) { - if (mode >= AUDIO_MODE_CNT) return BAD_VALUE; + if (uint32_t(mode) >= AUDIO_MODE_CNT) return BAD_VALUE; const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); if (af == 0) return PERMISSION_DENIED; return af->setMode(mode); @@ -203,7 +206,12 @@ int AudioSystem::logToLinear(float volume) return volume ? 100 - int(dBConvertInverse * log(volume) + 0.5) : 0; } -status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType) +// DEPRECATED +status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType) { + return getOutputSamplingRate(samplingRate, (audio_stream_type_t)streamType); +} + +status_t AudioSystem::getOutputSamplingRate(int* samplingRate, audio_stream_type_t streamType) { OutputDescriptor *outputDesc; audio_io_handle_t output; @@ -212,14 +220,14 @@ status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType) streamType = AUDIO_STREAM_MUSIC; } - output = getOutput((audio_stream_type_t)streamType); + output = getOutput(streamType); if (output == 0) { return PERMISSION_DENIED; } gLock.lock(); outputDesc = AudioSystem::gOutputs.valueFor(output); - if (outputDesc == 0) { + if (outputDesc == NULL) { ALOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output); gLock.unlock(); const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); @@ -236,7 +244,12 @@ status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType) return NO_ERROR; } -status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType) +// DEPRECATED +status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType) { + return getOutputFrameCount(frameCount, (audio_stream_type_t)streamType); +} + +status_t AudioSystem::getOutputFrameCount(int* frameCount, audio_stream_type_t streamType) { OutputDescriptor *outputDesc; audio_io_handle_t output; @@ -245,14 +258,14 @@ status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType) streamType = AUDIO_STREAM_MUSIC; } - output = getOutput((audio_stream_type_t)streamType); + output = getOutput(streamType); if (output == 0) { return PERMISSION_DENIED; } gLock.lock(); outputDesc = AudioSystem::gOutputs.valueFor(output); - if (outputDesc == 0) { + if (outputDesc == NULL) { gLock.unlock(); const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); if (af == 0) return PERMISSION_DENIED; @@ -267,7 +280,7 @@ status_t AudioSystem::getOutputFrameCount(int* frameCount, int streamType) return NO_ERROR; } -status_t AudioSystem::getOutputLatency(uint32_t* latency, int streamType) +status_t AudioSystem::getOutputLatency(uint32_t* latency, audio_stream_type_t streamType) { OutputDescriptor *outputDesc; audio_io_handle_t output; @@ -276,14 +289,14 @@ status_t AudioSystem::getOutputLatency(uint32_t* latency, int streamType) streamType = AUDIO_STREAM_MUSIC; } - output = getOutput((audio_stream_type_t)streamType); + output = getOutput(streamType); if (output == 0) { return PERMISSION_DENIED; } gLock.lock(); outputDesc = AudioSystem::gOutputs.valueFor(output); - if (outputDesc == 0) { + if (outputDesc == NULL) { gLock.unlock(); const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); if (af == 0) return PERMISSION_DENIED; @@ -298,25 +311,30 @@ status_t AudioSystem::getOutputLatency(uint32_t* latency, int streamType) return NO_ERROR; } -status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, int format, int channelCount, +status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount, size_t* buffSize) { + gLock.lock(); // Do we have a stale gInBufferSize or are we requesting the input buffer size for new values - if ((gInBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat) + size_t inBuffSize = gInBuffSize; + if ((inBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat) || (channelCount != gPrevInChannelCount)) { + gLock.unlock(); + const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); + if (af == 0) { + return PERMISSION_DENIED; + } + inBuffSize = af->getInputBufferSize(sampleRate, format, channelCount); + gLock.lock(); // save the request params gPrevInSamplingRate = sampleRate; gPrevInFormat = format; gPrevInChannelCount = channelCount; - gInBuffSize = 0; - const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); - if (af == 0) { - return PERMISSION_DENIED; - } - gInBuffSize = af->getInputBufferSize(sampleRate, format, channelCount); + gInBuffSize = inBuffSize; } - *buffSize = gInBuffSize; + gLock.unlock(); + *buffSize = inBuffSize; return NO_ERROR; } @@ -328,7 +346,7 @@ status_t AudioSystem::setVoiceVolume(float value) return af->setVoiceVolume(value); } -status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int stream) +status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, audio_stream_type_t stream) { const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger(); if (af == 0) return PERMISSION_DENIED; @@ -337,7 +355,7 @@ status_t AudioSystem::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames stream = AUDIO_STREAM_MUSIC; } - return af->getRenderPosition(halFrames, dspFrames, getOutput((audio_stream_type_t)stream)); + return af->getRenderPosition(halFrames, dspFrames, getOutput(stream)); } unsigned int AudioSystem::getInputFramesLost(audio_io_handle_t ioHandle) { @@ -386,10 +404,11 @@ void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who) { ALOGW("AudioFlinger server died!"); } -void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, int ioHandle, void *param2) { +void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle_t ioHandle, + const void *param2) { ALOGV("ioConfigChanged() event %d", event); - OutputDescriptor *desc; - uint32_t stream; + const OutputDescriptor *desc; + audio_stream_type_t stream; if (ioHandle == 0) return; @@ -397,8 +416,8 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, int ioHandle, v switch (event) { case STREAM_CONFIG_CHANGED: - if (param2 == 0) break; - stream = *(uint32_t *)param2; + if (param2 == NULL) break; + stream = *(const audio_stream_type_t *)param2; ALOGV("ioConfigChanged() STREAM_CONFIG_CHANGED stream %d, output %d", stream, ioHandle); if (gStreamOutputMap.indexOfKey(stream) >= 0) { gStreamOutputMap.replaceValueFor(stream, ioHandle); @@ -409,8 +428,8 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, int ioHandle, v ALOGV("ioConfigChanged() opening already existing output! %d", ioHandle); break; } - if (param2 == 0) break; - desc = (OutputDescriptor *)param2; + if (param2 == NULL) break; + desc = (const OutputDescriptor *)param2; OutputDescriptor *outputDesc = new OutputDescriptor(*desc); gOutputs.add(ioHandle, outputDesc); @@ -438,8 +457,8 @@ void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, int ioHandle, v ALOGW("ioConfigChanged() modifying unknow output! %d", ioHandle); break; } - if (param2 == 0) break; - desc = (OutputDescriptor *)param2; + if (param2 == NULL) break; + desc = (const OutputDescriptor *)param2; ALOGV("ioConfigChanged() new config for output %d samplingRate %d, format %d channels %d frameCount %d latency %d", ioHandle, desc->samplingRate, desc->format, @@ -462,7 +481,7 @@ void AudioSystem::setErrorCallback(audio_error_callback cb) { gAudioErrorCallback = cb; } -bool AudioSystem::routedToA2dpOutput(int streamType) { +bool AudioSystem::routedToA2dpOutput(audio_stream_type_t streamType) { switch(streamType) { case AUDIO_STREAM_MUSIC: case AUDIO_STREAM_VOICE_CALL: @@ -484,7 +503,7 @@ sp<AudioSystem::AudioPolicyServiceClient> AudioSystem::gAudioPolicyServiceClient const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service() { gLock.lock(); - if (gAudioPolicyService.get() == 0) { + if (gAudioPolicyService == 0) { sp<IServiceManager> sm = defaultServiceManager(); sp<IBinder> binder; do { @@ -531,21 +550,15 @@ audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t d return aps->getDeviceConnectionState(device, device_address); } -status_t AudioSystem::setPhoneState(int state) +status_t AudioSystem::setPhoneState(audio_mode_t state) { + if (uint32_t(state) >= AUDIO_MODE_CNT) return BAD_VALUE; const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); if (aps == 0) return PERMISSION_DENIED; return aps->setPhoneState(state); } -status_t AudioSystem::setRingerMode(uint32_t mode, uint32_t mask) -{ - const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); - if (aps == 0) return PERMISSION_DENIED; - return aps->setRingerMode(mode, mask); -} - status_t AudioSystem::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); @@ -563,7 +576,7 @@ audio_policy_forced_cfg_t AudioSystem::getForceUse(audio_policy_force_use_t usag audio_io_handle_t AudioSystem::getOutput(audio_stream_type_t stream, uint32_t samplingRate, - uint32_t format, + audio_format_t format, uint32_t channels, audio_policy_output_flags_t flags) { @@ -621,9 +634,9 @@ void AudioSystem::releaseOutput(audio_io_handle_t output) aps->releaseOutput(output); } -audio_io_handle_t AudioSystem::getInput(int inputSource, +audio_io_handle_t AudioSystem::getInput(audio_source_t inputSource, uint32_t samplingRate, - uint32_t format, + audio_format_t format, uint32_t channels, audio_in_acoustics_t acoustics, int sessionId) @@ -663,18 +676,22 @@ status_t AudioSystem::initStreamVolume(audio_stream_type_t stream, return aps->initStreamVolume(stream, indexMin, indexMax); } -status_t AudioSystem::setStreamVolumeIndex(audio_stream_type_t stream, int index) +status_t AudioSystem::setStreamVolumeIndex(audio_stream_type_t stream, + int index, + audio_devices_t device) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); if (aps == 0) return PERMISSION_DENIED; - return aps->setStreamVolumeIndex(stream, index); + return aps->setStreamVolumeIndex(stream, index, device); } -status_t AudioSystem::getStreamVolumeIndex(audio_stream_type_t stream, int *index) +status_t AudioSystem::getStreamVolumeIndex(audio_stream_type_t stream, + int *index, + audio_devices_t device) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); if (aps == 0) return PERMISSION_DENIED; - return aps->getStreamVolumeIndex(stream, index); + return aps->getStreamVolumeIndex(stream, index, device); } uint32_t AudioSystem::getStrategyForStream(audio_stream_type_t stream) @@ -684,10 +701,10 @@ uint32_t AudioSystem::getStrategyForStream(audio_stream_type_t stream) return aps->getStrategyForStream(stream); } -uint32_t AudioSystem::getDevicesForStream(audio_stream_type_t stream) +audio_devices_t AudioSystem::getDevicesForStream(audio_stream_type_t stream) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); - if (aps == 0) return 0; + if (aps == 0) return (audio_devices_t)0; return aps->getDevicesForStream(stream); } @@ -723,7 +740,7 @@ status_t AudioSystem::setEffectEnabled(int id, bool enabled) return aps->setEffectEnabled(id, enabled); } -status_t AudioSystem::isStreamActive(int stream, bool* state, uint32_t inPastMs) +status_t AudioSystem::isStreamActive(audio_stream_type_t stream, bool* state, uint32_t inPastMs) { const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service(); if (aps == 0) return PERMISSION_DENIED; diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp index d51cd69..a1c99e5 100644 --- a/media/libmedia/AudioTrack.cpp +++ b/media/libmedia/AudioTrack.cpp @@ -1,4 +1,4 @@ -/* //device/extlibs/pv/android/AudioTrack.cpp +/* ** ** Copyright 2007, The Android Open Source Project ** @@ -38,12 +38,12 @@ #include <utils/Atomic.h> #include <cutils/bitops.h> +#include <cutils/compiler.h> #include <system/audio.h> #include <system/audio_policy.h> -#define LIKELY( exp ) (__builtin_expect( (exp) != 0, true )) -#define UNLIKELY( exp ) (__builtin_expect( (exp) != 0, false )) +#include <audio_utils/primitives.h> namespace android { // --------------------------------------------------------------------------- @@ -51,7 +51,7 @@ namespace android { // static status_t AudioTrack::getMinFrameCount( int* frameCount, - int streamType, + audio_stream_type_t streamType, uint32_t sampleRate) { int afSampleRate; @@ -79,14 +79,17 @@ status_t AudioTrack::getMinFrameCount( // --------------------------------------------------------------------------- AudioTrack::AudioTrack() - : mStatus(NO_INIT) + : mStatus(NO_INIT), + mIsTimed(false), + mPreviousPriority(ANDROID_PRIORITY_NORMAL), + mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT) { } AudioTrack::AudioTrack( - int streamType, + audio_stream_type_t streamType, uint32_t sampleRate, - int format, + audio_format_t format, int channelMask, int frameCount, uint32_t flags, @@ -94,7 +97,10 @@ AudioTrack::AudioTrack( void* user, int notificationFrames, int sessionId) - : mStatus(NO_INIT) + : mStatus(NO_INIT), + mIsTimed(false), + mPreviousPriority(ANDROID_PRIORITY_NORMAL), + mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT) { mStatus = set(streamType, sampleRate, format, channelMask, frameCount, flags, cbf, user, notificationFrames, @@ -106,13 +112,35 @@ AudioTrack::AudioTrack( uint32_t sampleRate, int format, int channelMask, + int frameCount, + uint32_t flags, + callback_t cbf, + void* user, + int notificationFrames, + int sessionId) + : mStatus(NO_INIT), + mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT) +{ + mStatus = set((audio_stream_type_t)streamType, sampleRate, (audio_format_t)format, channelMask, + frameCount, flags, cbf, user, notificationFrames, + 0, false, sessionId); +} + +AudioTrack::AudioTrack( + audio_stream_type_t streamType, + uint32_t sampleRate, + audio_format_t format, + int channelMask, const sp<IMemory>& sharedBuffer, uint32_t flags, callback_t cbf, void* user, int notificationFrames, int sessionId) - : mStatus(NO_INIT) + : mStatus(NO_INIT), + mIsTimed(false), + mPreviousPriority(ANDROID_PRIORITY_NORMAL), + mPreviousSchedulingGroup(ANDROID_TGROUP_DEFAULT) { mStatus = set(streamType, sampleRate, format, channelMask, 0, flags, cbf, user, notificationFrames, @@ -139,9 +167,9 @@ AudioTrack::~AudioTrack() } status_t AudioTrack::set( - int streamType, + audio_stream_type_t streamType, uint32_t sampleRate, - int format, + audio_format_t format, int channelMask, int frameCount, uint32_t flags, @@ -178,7 +206,7 @@ status_t AudioTrack::set( sampleRate = afSampleRate; } // these below should probably come from the audioFlinger too... - if (format == 0) { + if (format == AUDIO_FORMAT_DEFAULT) { format = AUDIO_FORMAT_PCM_16_BIT; } if (channelMask == 0) { @@ -203,8 +231,8 @@ status_t AudioTrack::set( uint32_t channelCount = popcount(channelMask); audio_io_handle_t output = AudioSystem::getOutput( - (audio_stream_type_t)streamType, - sampleRate,format, channelMask, + streamType, + sampleRate, format, channelMask, (audio_policy_output_flags_t)flags); if (output == 0) { @@ -214,7 +242,7 @@ status_t AudioTrack::set( mVolume[LEFT] = 1.0f; mVolume[RIGHT] = 1.0f; - mSendLevel = 0; + mSendLevel = 0.0f; mFrameCount = frameCount; mNotificationFramesReq = notificationFrames; mSessionId = sessionId; @@ -223,7 +251,7 @@ status_t AudioTrack::set( // create the IAudioTrack status_t status = createTrack_l(streamType, sampleRate, - (uint32_t)format, + format, (uint32_t)channelMask, frameCount, flags, @@ -235,23 +263,19 @@ status_t AudioTrack::set( return status; } - if (cbf != 0) { + if (cbf != NULL) { mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava); - if (mAudioTrackThread == 0) { - ALOGE("Could not create callback thread"); - return NO_INIT; - } } mStatus = NO_ERROR; mStreamType = streamType; - mFormat = (uint32_t)format; + mFormat = format; mChannelMask = (uint32_t)channelMask; mChannelCount = channelCount; mSharedBuffer = sharedBuffer; mMuted = false; - mActive = 0; + mActive = false; mCbf = cbf; mUserData = user; mLoopCount = 0; @@ -278,12 +302,12 @@ uint32_t AudioTrack::latency() const return mLatency; } -int AudioTrack::streamType() const +audio_stream_type_t AudioTrack::streamType() const { return mStreamType; } -int AudioTrack::format() const +audio_format_t AudioTrack::format() const { return mFormat; } @@ -298,7 +322,7 @@ uint32_t AudioTrack::frameCount() const return mCblk->frameCount; } -int AudioTrack::frameSize() const +size_t AudioTrack::frameSize() const { if (audio_is_linear_pcm(mFormat)) { return channelCount()*audio_bytes_per_sample(mFormat); @@ -327,7 +351,6 @@ void AudioTrack::start() return; } } - t->mLock.lock(); } AutoMutex lock(mLock); @@ -337,24 +360,34 @@ void AudioTrack::start() sp <IMemory> iMem = mCblkMemory; audio_track_cblk_t* cblk = mCblk; - if (mActive == 0) { + if (!mActive) { mFlushed = false; - mActive = 1; + mActive = true; mNewPosition = cblk->server + mUpdatePeriod; cblk->lock.lock(); cblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS; cblk->waitTimeMs = 0; android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags); + pid_t tid; if (t != 0) { - t->run("AudioTrackThread", ANDROID_PRIORITY_AUDIO); + t->run("AudioTrack", ANDROID_PRIORITY_AUDIO); + tid = t->getTid(); // pid_t is unknown until run() + ALOGV("getTid=%d", tid); + if (tid == -1) { + tid = 0; + } } else { - setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_AUDIO); + mPreviousPriority = getpriority(PRIO_PROCESS, 0); + mPreviousSchedulingGroup = androidGetThreadSchedulingGroup(0); + androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO); + tid = 0; // not gettid() } ALOGV("start %p before lock cblk %p", this, mCblk); if (!(cblk->flags & CBLK_INVALID_MSK)) { cblk->lock.unlock(); - status = mAudioTrack->start(); + ALOGV("mAudioTrack->start(tid=%d)", tid); + status = mAudioTrack->start(tid); cblk->lock.lock(); if (status == DEAD_OBJECT) { android_atomic_or(CBLK_INVALID_ON, &cblk->flags); @@ -366,18 +399,16 @@ void AudioTrack::start() cblk->lock.unlock(); if (status != NO_ERROR) { ALOGV("start() failed"); - mActive = 0; + mActive = false; if (t != 0) { t->requestExit(); } else { - setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL); + setpriority(PRIO_PROCESS, 0, mPreviousPriority); + androidSetThreadSchedulingGroup(0, mPreviousSchedulingGroup); } } } - if (t != 0) { - t->mLock.unlock(); - } } void AudioTrack::stop() @@ -385,13 +416,10 @@ void AudioTrack::stop() sp<AudioTrackThread> t = mAudioTrackThread; ALOGV("stop %p", this); - if (t != 0) { - t->mLock.lock(); - } AutoMutex lock(mLock); - if (mActive == 1) { - mActive = 0; + if (mActive) { + mActive = false; mCblk->cv.signal(); mAudioTrack->stop(); // Cancel loops (If we are in the middle of a loop, playback @@ -408,18 +436,17 @@ void AudioTrack::stop() if (t != 0) { t->requestExit(); } else { - setpriority(PRIO_PROCESS, 0, ANDROID_PRIORITY_NORMAL); + setpriority(PRIO_PROCESS, 0, mPreviousPriority); + androidSetThreadSchedulingGroup(0, mPreviousSchedulingGroup); } } - if (t != 0) { - t->mLock.unlock(); - } } bool AudioTrack::stopped() const { - return !mActive; + AutoMutex lock(mLock); + return stopped_l(); } void AudioTrack::flush() @@ -451,8 +478,8 @@ void AudioTrack::pause() { ALOGV("pause"); AutoMutex lock(mLock); - if (mActive == 1) { - mActive = 0; + if (mActive) { + mActive = false; mAudioTrack->pause(); } } @@ -470,7 +497,7 @@ bool AudioTrack::muted() const status_t AudioTrack::setVolume(float left, float right) { - if (left > 1.0f || right > 1.0f) { + if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) { return BAD_VALUE; } @@ -478,13 +505,12 @@ status_t AudioTrack::setVolume(float left, float right) mVolume[LEFT] = left; mVolume[RIGHT] = right; - // write must be atomic - mCblk->volumeLR = (uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000); + mCblk->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000)); return NO_ERROR; } -void AudioTrack::getVolume(float* left, float* right) +void AudioTrack::getVolume(float* left, float* right) const { if (left != NULL) { *left = mVolume[LEFT]; @@ -497,19 +523,19 @@ void AudioTrack::getVolume(float* left, float* right) status_t AudioTrack::setAuxEffectSendLevel(float level) { ALOGV("setAuxEffectSendLevel(%f)", level); - if (level > 1.0f) { + if (level < 0.0f || level > 1.0f) { return BAD_VALUE; } AutoMutex lock(mLock); mSendLevel = level; - mCblk->sendLevel = uint16_t(level * 0x1000); + mCblk->setSendLevel(level); return NO_ERROR; } -void AudioTrack::getAuxEffectSendLevel(float* level) +void AudioTrack::getAuxEffectSendLevel(float* level) const { if (level != NULL) { *level = mSendLevel; @@ -520,6 +546,10 @@ status_t AudioTrack::setSampleRate(int rate) { int afSamplingRate; + if (mIsTimed) { + return INVALID_OPERATION; + } + if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) { return NO_INIT; } @@ -531,8 +561,12 @@ status_t AudioTrack::setSampleRate(int rate) return NO_ERROR; } -uint32_t AudioTrack::getSampleRate() +uint32_t AudioTrack::getSampleRate() const { + if (mIsTimed) { + return INVALID_OPERATION; + } + AutoMutex lock(mLock); return mCblk->sampleRate; } @@ -558,6 +592,10 @@ status_t AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCou return NO_ERROR; } + if (mIsTimed) { + return INVALID_OPERATION; + } + if (loopStart >= loopEnd || loopEnd - loopStart > cblk->frameCount || cblk->server > loopStart) { @@ -579,29 +617,9 @@ status_t AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCou return NO_ERROR; } -status_t AudioTrack::getLoop(uint32_t *loopStart, uint32_t *loopEnd, int *loopCount) -{ - AutoMutex lock(mLock); - if (loopStart != 0) { - *loopStart = mCblk->loopStart; - } - if (loopEnd != 0) { - *loopEnd = mCblk->loopEnd; - } - if (loopCount != 0) { - if (mCblk->loopCount < 0) { - *loopCount = -1; - } else { - *loopCount = mCblk->loopCount; - } - } - - return NO_ERROR; -} - status_t AudioTrack::setMarkerPosition(uint32_t marker) { - if (mCbf == 0) return INVALID_OPERATION; + if (mCbf == NULL) return INVALID_OPERATION; mMarkerPosition = marker; mMarkerReached = false; @@ -609,9 +627,9 @@ status_t AudioTrack::setMarkerPosition(uint32_t marker) return NO_ERROR; } -status_t AudioTrack::getMarkerPosition(uint32_t *marker) +status_t AudioTrack::getMarkerPosition(uint32_t *marker) const { - if (marker == 0) return BAD_VALUE; + if (marker == NULL) return BAD_VALUE; *marker = mMarkerPosition; @@ -620,7 +638,7 @@ status_t AudioTrack::getMarkerPosition(uint32_t *marker) status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) { - if (mCbf == 0) return INVALID_OPERATION; + if (mCbf == NULL) return INVALID_OPERATION; uint32_t curPosition; getPosition(&curPosition); @@ -630,9 +648,9 @@ status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) return NO_ERROR; } -status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) +status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const { - if (updatePeriod == 0) return BAD_VALUE; + if (updatePeriod == NULL) return BAD_VALUE; *updatePeriod = mUpdatePeriod; @@ -641,10 +659,13 @@ status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) status_t AudioTrack::setPosition(uint32_t position) { + if (mIsTimed) return INVALID_OPERATION; + AutoMutex lock(mLock); - Mutex::Autolock _l(mCblk->lock); - if (!stopped()) return INVALID_OPERATION; + if (!stopped_l()) return INVALID_OPERATION; + + Mutex::Autolock _l(mCblk->lock); if (position > mCblk->user) return BAD_VALUE; @@ -656,7 +677,7 @@ status_t AudioTrack::setPosition(uint32_t position) status_t AudioTrack::getPosition(uint32_t *position) { - if (position == 0) return BAD_VALUE; + if (position == NULL) return BAD_VALUE; AutoMutex lock(mLock); *position = mFlushed ? 0 : mCblk->server; @@ -667,7 +688,7 @@ status_t AudioTrack::reload() { AutoMutex lock(mLock); - if (!stopped()) return INVALID_OPERATION; + if (!stopped_l()) return INVALID_OPERATION; flush_l(); @@ -685,11 +706,11 @@ audio_io_handle_t AudioTrack::getOutput() // must be called with mLock held audio_io_handle_t AudioTrack::getOutput_l() { - return AudioSystem::getOutput((audio_stream_type_t)mStreamType, + return AudioSystem::getOutput(mStreamType, mCblk->sampleRate, mFormat, mChannelMask, (audio_policy_output_flags_t)mFlags); } -int AudioTrack::getSessionId() +int AudioTrack::getSessionId() const { return mSessionId; } @@ -708,9 +729,9 @@ status_t AudioTrack::attachAuxEffect(int effectId) // must be called with mLock held status_t AudioTrack::createTrack_l( - int streamType, + audio_stream_type_t streamType, uint32_t sampleRate, - uint32_t format, + audio_format_t format, uint32_t channelMask, int frameCount, uint32_t flags, @@ -763,15 +784,12 @@ status_t AudioTrack::createTrack_l( mNotificationFramesAct = frameCount/2; } if (frameCount < minFrameCount) { - if (enforceFrameCount) { - ALOGE("Invalid buffer size: minFrameCount %d, frameCount %d", minFrameCount, frameCount); - return BAD_VALUE; - } else { - frameCount = minFrameCount; - } + ALOGW_IF(enforceFrameCount, "Minimum buffer size corrected from %d to %d", + frameCount, minFrameCount); + frameCount = minFrameCount; } } else { - // Ensure that buffer alignment matches channelcount + // Ensure that buffer alignment matches channelCount int channelCount = popcount(channelMask); if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) { ALOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount); @@ -790,6 +808,7 @@ status_t AudioTrack::createTrack_l( ((uint16_t)flags) << 16, sharedBuffer, output, + mIsTimed, &mSessionId, &status); @@ -802,9 +821,7 @@ status_t AudioTrack::createTrack_l( ALOGE("Could not get control block"); return NO_INIT; } - mAudioTrack.clear(); mAudioTrack = track; - mCblkMemory.clear(); mCblkMemory = cblk; mCblk = static_cast<audio_track_cblk_t*>(cblk->pointer()); android_atomic_or(CBLK_DIRECTION_OUT, &mCblk->flags); @@ -816,8 +833,8 @@ status_t AudioTrack::createTrack_l( mCblk->stepUser(mCblk->frameCount); } - mCblk->volumeLR = (uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000); - mCblk->sendLevel = uint16_t(mSendLevel * 0x1000); + mCblk->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000)); + mCblk->setSendLevel(mSendLevel); mAudioTrack->attachAuxEffect(mAuxEffectId); mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS; mCblk->waitTimeMs = 0; @@ -829,7 +846,7 @@ status_t AudioTrack::createTrack_l( status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) { AutoMutex lock(mLock); - int active; + bool active; status_t result = NO_ERROR; audio_track_cblk_t* cblk = mCblk; uint32_t framesReq = audioBuffer->frameCount; @@ -851,12 +868,12 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) goto start_loop_here; while (framesAvail == 0) { active = mActive; - if (UNLIKELY(!active)) { + if (CC_UNLIKELY(!active)) { ALOGV("Not active and NO_MORE_BUFFERS"); cblk->lock.unlock(); return NO_MORE_BUFFERS; } - if (UNLIKELY(!waitCount)) { + if (CC_UNLIKELY(!waitCount)) { cblk->lock.unlock(); return WOULD_BLOCK; } @@ -865,7 +882,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs)); cblk->lock.unlock(); mLock.lock(); - if (mActive == 0) { + if (!mActive) { return status_t(STOPPED); } cblk->lock.lock(); @@ -874,7 +891,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) if (cblk->flags & CBLK_INVALID_MSK) { goto create_new_track; } - if (__builtin_expect(result!=NO_ERROR, false)) { + if (CC_UNLIKELY(result != NO_ERROR)) { cblk->waitTimeMs += waitTimeMs; if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) { // timing out when a loop has been set and we have already written upto loop end @@ -884,7 +901,7 @@ status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) "user=%08x, server=%08x", this, cblk->user, cblk->server); //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140) cblk->lock.unlock(); - result = mAudioTrack->start(); + result = mAudioTrack->start(0); // callback thread hasn't changed cblk->lock.lock(); if (result == DEAD_OBJECT) { android_atomic_or(CBLK_INVALID_ON, &cblk->flags); @@ -916,7 +933,7 @@ create_new_track: if (mActive && (cblk->flags & CBLK_DISABLED_MSK)) { android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags); ALOGW("obtainBuffer() track %p disabled, restarting", this); - mAudioTrack->start(); + mAudioTrack->start(0); // callback thread hasn't changed } cblk->waitTimeMs = 0; @@ -958,9 +975,11 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize) { if (mSharedBuffer != 0) return INVALID_OPERATION; + if (mIsTimed) return INVALID_OPERATION; if (ssize_t(userSize) < 0) { - // sanity-check. user is most-likely passing an error code. + // Sanity-check: user is most-likely passing an error code, and it would + // make the return value ambiguous (actualSize vs error). ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize); return BAD_VALUE; @@ -978,13 +997,11 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize) ssize_t written = 0; const int8_t *src = (const int8_t *)buffer; Buffer audioBuffer; - size_t frameSz = (size_t)frameSize(); + size_t frameSz = frameSize(); do { audioBuffer.frameCount = userSize/frameSz; - // Calling obtainBuffer() with a negative wait count causes - // an (almost) infinite wait time. status_t err = obtainBuffer(&audioBuffer, -1); if (err < 0) { // out of buffers, return #bytes written @@ -998,12 +1015,7 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize) if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT)) { // Divide capacity by 2 to take expansion into account toWrite = audioBuffer.size>>1; - // 8 to 16 bit conversion - int count = toWrite; - int16_t *dst = (int16_t *)(audioBuffer.i8); - while(count--) { - *dst++ = (int16_t)(*src++^0x80) << 8; - } + memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) src, toWrite); } else { toWrite = audioBuffer.size; memcpy(audioBuffer.i8, src, toWrite); @@ -1020,6 +1032,59 @@ ssize_t AudioTrack::write(const void* buffer, size_t userSize) // ------------------------------------------------------------------------- +TimedAudioTrack::TimedAudioTrack() { + mIsTimed = true; +} + +status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer) +{ + status_t result = UNKNOWN_ERROR; + + // If the track is not invalid already, try to allocate a buffer. alloc + // fails indicating that the server is dead, flag the track as invalid so + // we can attempt to restore in in just a bit. + if (!(mCblk->flags & CBLK_INVALID_MSK)) { + result = mAudioTrack->allocateTimedBuffer(size, buffer); + if (result == DEAD_OBJECT) { + android_atomic_or(CBLK_INVALID_ON, &mCblk->flags); + } + } + + // If the track is invalid at this point, attempt to restore it. and try the + // allocation one more time. + if (mCblk->flags & CBLK_INVALID_MSK) { + mCblk->lock.lock(); + result = restoreTrack_l(mCblk, false); + mCblk->lock.unlock(); + + if (result == OK) + result = mAudioTrack->allocateTimedBuffer(size, buffer); + } + + return result; +} + +status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer, + int64_t pts) +{ + // restart track if it was disabled by audioflinger due to previous underrun + if (mActive && (mCblk->flags & CBLK_DISABLED_MSK)) { + android_atomic_and(~CBLK_DISABLED_ON, &mCblk->flags); + ALOGW("queueTimedBuffer() track %p disabled, restarting", this); + mAudioTrack->start(0); + } + + return mAudioTrack->queueTimedBuffer(buffer, pts); +} + +status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform, + TargetTimeline target) +{ + return mAudioTrack->setMediaTimeTransform(xform, target); +} + +// ------------------------------------------------------------------------- + bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) { Buffer audioBuffer; @@ -1032,10 +1097,11 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) sp <IAudioTrack> audioTrack = mAudioTrack; sp <IMemory> iMem = mCblkMemory; audio_track_cblk_t* cblk = mCblk; + bool active = mActive; mLock.unlock(); // Manage underrun callback - if (mActive && (cblk->framesAvailable() == cblk->frameCount)) { + if (active && (cblk->framesAvailable() == cblk->frameCount)) { ALOGV("Underrun user: %x, server: %x, flags %04x", cblk->user, cblk->server, cblk->flags); if (!(android_atomic_or(CBLK_UNDERRUN_ON, &cblk->flags) & CBLK_UNDERRUN_MSK)) { mCbf(EVENT_UNDERRUN, mUserData, 0); @@ -1078,6 +1144,9 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) frames = mRemainingFrames; } + // See description of waitCount parameter at declaration of obtainBuffer(). + // The logic below prevents us from being stuck below at obtainBuffer() + // not being able to handle timed events (position, markers, loops). int32_t waitCount = -1; if (mUpdatePeriod || (!mMarkerReached && mMarkerPosition) || mLoopCount) { waitCount = 1; @@ -1087,9 +1156,6 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) audioBuffer.frameCount = frames; - // Calling obtainBuffer() with a wait count of 1 - // limits wait time to WAIT_PERIOD_MS. This prevents from being - // stuck here not being able to handle timed events (position, markers, loops). status_t err = obtainBuffer(&audioBuffer, waitCount); if (err < NO_ERROR) { if (err != TIMED_OUT) { @@ -1123,19 +1189,14 @@ bool AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) if (writtenSize > reqSize) writtenSize = reqSize; if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_POLICY_OUTPUT_FLAG_DIRECT)) { - // 8 to 16 bit conversion - const int8_t *src = audioBuffer.i8 + writtenSize-1; - int count = writtenSize; - int16_t *dst = audioBuffer.i16 + writtenSize-1; - while(count--) { - *dst-- = (int16_t)(*src--^0x80) << 8; - } + // 8 to 16 bit conversion, note that source and destination are the same address + memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize); writtenSize <<= 1; } audioBuffer.size = writtenSize; // NOTE: mCblk->frameSize is not equal to AudioTrack::frameSize() for - // 8 bit PCM data: in this case, mCblk->frameSize is based on a sampel size of + // 8 bit PCM data: in this case, mCblk->frameSize is based on a sample size of // 16 bit. audioBuffer.frameCount = writtenSize/mCblk->frameSize; @@ -1216,7 +1277,7 @@ status_t AudioTrack::restoreTrack_l(audio_track_cblk_t*& cblk, bool fromStart) } } if (mActive) { - result = mAudioTrack->start(); + result = mAudioTrack->start(0); // callback thread hasn't changed ALOGW_IF(result != NO_ERROR, "restoreTrack_l() start() failed status %d", result); } if (fromStart && result == NO_ERROR) { @@ -1307,15 +1368,15 @@ void AudioTrack::AudioTrackThread::onFirstRef() audio_track_cblk_t::audio_track_cblk_t() : lock(Mutex::SHARED), cv(Condition::SHARED), user(0), server(0), - userBase(0), serverBase(0), buffers(0), frameCount(0), - loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), volumeLR(0), - sendLevel(0), flags(0) + userBase(0), serverBase(0), buffers(NULL), frameCount(0), + loopStart(UINT_MAX), loopEnd(UINT_MAX), loopCount(0), mVolumeLR(0x10001000), + mSendLevel(0), flags(0) { } uint32_t audio_track_cblk_t::stepUser(uint32_t frameCount) { - uint32_t u = this->user; + uint32_t u = user; u += frameCount; // Ensure that user is never ahead of server for AudioRecord @@ -1324,16 +1385,16 @@ uint32_t audio_track_cblk_t::stepUser(uint32_t frameCount) if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) { bufferTimeoutMs = MAX_RUN_TIMEOUT_MS; } - } else if (u > this->server) { - ALOGW("stepServer occured after track reset"); - u = this->server; + } else if (u > server) { + ALOGW("stepServer occurred after track reset"); + u = server; } if (u >= userBase + this->frameCount) { userBase += this->frameCount; } - this->user = u; + user = u; // Clear flow control error condition as new data has been written/read to/from buffer. if (flags & CBLK_UNDERRUN_MSK) { @@ -1350,7 +1411,7 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount) return false; } - uint32_t s = this->server; + uint32_t s = server; s += frameCount; if (flags & CBLK_DIRECTION_MSK) { @@ -1363,9 +1424,9 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount) // while the mixer is processing a block: in this case, // stepServer() is called After the flush() has reset u & s and // we have s > u - if (s > this->user) { - ALOGW("stepServer occured after track reset"); - s = this->user; + if (s > user) { + ALOGW("stepServer occurred after track reset"); + s = user; } } @@ -1381,7 +1442,7 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount) serverBase += this->frameCount; } - this->server = s; + server = s; if (!(flags & CBLK_INVALID_MSK)) { cv.signal(); @@ -1392,7 +1453,7 @@ bool audio_track_cblk_t::stepServer(uint32_t frameCount) void* audio_track_cblk_t::buffer(uint32_t offset) const { - return (int8_t *)this->buffers + (offset - userBase) * this->frameSize; + return (int8_t *)buffers + (offset - userBase) * frameSize; } uint32_t audio_track_cblk_t::framesAvailable() @@ -1403,8 +1464,8 @@ uint32_t audio_track_cblk_t::framesAvailable() uint32_t audio_track_cblk_t::framesAvailable_l() { - uint32_t u = this->user; - uint32_t s = this->server; + uint32_t u = user; + uint32_t s = server; if (flags & CBLK_DIRECTION_MSK) { uint32_t limit = (s < loopStart) ? s : loopStart; @@ -1416,8 +1477,8 @@ uint32_t audio_track_cblk_t::framesAvailable_l() uint32_t audio_track_cblk_t::framesReady() { - uint32_t u = this->user; - uint32_t s = this->server; + uint32_t u = user; + uint32_t s = server; if (flags & CBLK_DIRECTION_MSK) { if (u < loopEnd) { @@ -1462,4 +1523,3 @@ bool audio_track_cblk_t::tryLock() // ------------------------------------------------------------------------- }; // namespace android - diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp index abd491f..ebadbfa 100644 --- a/media/libmedia/IAudioFlinger.cpp +++ b/media/libmedia/IAudioFlinger.cpp @@ -82,14 +82,15 @@ public: virtual sp<IAudioTrack> createTrack( pid_t pid, - int streamType, + audio_stream_type_t streamType, uint32_t sampleRate, - uint32_t format, + audio_format_t format, uint32_t channelMask, int frameCount, uint32_t flags, const sp<IMemory>& sharedBuffer, - int output, + audio_io_handle_t output, + bool isTimed, int *sessionId, status_t *status) { @@ -97,14 +98,15 @@ public: sp<IAudioTrack> track; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); data.writeInt32(pid); - data.writeInt32(streamType); + data.writeInt32((int32_t) streamType); data.writeInt32(sampleRate); data.writeInt32(format); data.writeInt32(channelMask); data.writeInt32(frameCount); data.writeInt32(flags); data.writeStrongBinder(sharedBuffer->asBinder()); - data.writeInt32(output); + data.writeInt32((int32_t) output); + data.writeInt32(isTimed); int lSessionId = 0; if (sessionId != NULL) { lSessionId = *sessionId; @@ -129,9 +131,9 @@ public: virtual sp<IAudioRecord> openRecord( pid_t pid, - int input, + audio_io_handle_t input, uint32_t sampleRate, - uint32_t format, + audio_format_t format, uint32_t channelMask, int frameCount, uint32_t flags, @@ -142,7 +144,7 @@ public: sp<IAudioRecord> record; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); data.writeInt32(pid); - data.writeInt32(input); + data.writeInt32((int32_t) input); data.writeInt32(sampleRate); data.writeInt32(format); data.writeInt32(channelMask); @@ -170,47 +172,47 @@ public: return record; } - virtual uint32_t sampleRate(int output) const + virtual uint32_t sampleRate(audio_io_handle_t output) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(SAMPLE_RATE, data, &reply); return reply.readInt32(); } - virtual int channelCount(int output) const + virtual int channelCount(audio_io_handle_t output) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(CHANNEL_COUNT, data, &reply); return reply.readInt32(); } - virtual uint32_t format(int output) const + virtual audio_format_t format(audio_io_handle_t output) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(FORMAT, data, &reply); - return reply.readInt32(); + return (audio_format_t) reply.readInt32(); } - virtual size_t frameCount(int output) const + virtual size_t frameCount(audio_io_handle_t output) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(FRAME_COUNT, data, &reply); return reply.readInt32(); } - virtual uint32_t latency(int output) const + virtual uint32_t latency(audio_io_handle_t output) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(LATENCY, data, &reply); return reply.readInt32(); } @@ -249,47 +251,48 @@ public: return reply.readInt32(); } - virtual status_t setStreamVolume(int stream, float value, int output) + virtual status_t setStreamVolume(audio_stream_type_t stream, float value, + audio_io_handle_t output) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(stream); + data.writeInt32((int32_t) stream); data.writeFloat(value); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(SET_STREAM_VOLUME, data, &reply); return reply.readInt32(); } - virtual status_t setStreamMute(int stream, bool muted) + virtual status_t setStreamMute(audio_stream_type_t stream, bool muted) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(stream); + data.writeInt32((int32_t) stream); data.writeInt32(muted); remote()->transact(SET_STREAM_MUTE, data, &reply); return reply.readInt32(); } - virtual float streamVolume(int stream, int output) const + virtual float streamVolume(audio_stream_type_t stream, audio_io_handle_t output) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(stream); - data.writeInt32(output); + data.writeInt32((int32_t) stream); + data.writeInt32((int32_t) output); remote()->transact(STREAM_VOLUME, data, &reply); return reply.readFloat(); } - virtual bool streamMute(int stream) const + virtual bool streamMute(audio_stream_type_t stream) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(stream); + data.writeInt32((int32_t) stream); remote()->transact(STREAM_MUTE, data, &reply); return reply.readInt32(); } - virtual status_t setMode(int mode) + virtual status_t setMode(audio_mode_t mode) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); @@ -315,21 +318,21 @@ public: return reply.readInt32(); } - virtual status_t setParameters(int ioHandle, const String8& keyValuePairs) + virtual status_t setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(ioHandle); + data.writeInt32((int32_t) ioHandle); data.writeString8(keyValuePairs); remote()->transact(SET_PARAMETERS, data, &reply); return reply.readInt32(); } - virtual String8 getParameters(int ioHandle, const String8& keys) + virtual String8 getParameters(audio_io_handle_t ioHandle, const String8& keys) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(ioHandle); + data.writeInt32((int32_t) ioHandle); data.writeString8(keys); remote()->transact(GET_PARAMETERS, data, &reply); return reply.readString8(); @@ -343,7 +346,7 @@ public: remote()->transact(REGISTER_CLIENT, data, &reply); } - virtual size_t getInputBufferSize(uint32_t sampleRate, int format, int channelCount) + virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format, int channelCount) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); @@ -354,9 +357,9 @@ public: return reply.readInt32(); } - virtual int openOutput(uint32_t *pDevices, + virtual audio_io_handle_t openOutput(uint32_t *pDevices, uint32_t *pSamplingRate, - uint32_t *pFormat, + audio_format_t *pFormat, uint32_t *pChannels, uint32_t *pLatencyMs, uint32_t flags) @@ -364,7 +367,7 @@ public: Parcel data, reply; uint32_t devices = pDevices ? *pDevices : 0; uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0; - uint32_t format = pFormat ? *pFormat : 0; + audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT; uint32_t channels = pChannels ? *pChannels : 0; uint32_t latency = pLatencyMs ? *pLatencyMs : 0; @@ -376,13 +379,13 @@ public: data.writeInt32(latency); data.writeInt32(flags); remote()->transact(OPEN_OUTPUT, data, &reply); - int output = reply.readInt32(); - ALOGV("openOutput() returned output, %p", output); + audio_io_handle_t output = (audio_io_handle_t) reply.readInt32(); + ALOGV("openOutput() returned output, %d", output); devices = reply.readInt32(); if (pDevices) *pDevices = devices; samplingRate = reply.readInt32(); if (pSamplingRate) *pSamplingRate = samplingRate; - format = reply.readInt32(); + format = (audio_format_t) reply.readInt32(); if (pFormat) *pFormat = format; channels = reply.readInt32(); if (pChannels) *pChannels = channels; @@ -391,53 +394,54 @@ public: return output; } - virtual int openDuplicateOutput(int output1, int output2) + virtual audio_io_handle_t openDuplicateOutput(audio_io_handle_t output1, + audio_io_handle_t output2) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output1); - data.writeInt32(output2); + data.writeInt32((int32_t) output1); + data.writeInt32((int32_t) output2); remote()->transact(OPEN_DUPLICATE_OUTPUT, data, &reply); - return reply.readInt32(); + return (audio_io_handle_t) reply.readInt32(); } - virtual status_t closeOutput(int output) + virtual status_t closeOutput(audio_io_handle_t output) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(CLOSE_OUTPUT, data, &reply); return reply.readInt32(); } - virtual status_t suspendOutput(int output) + virtual status_t suspendOutput(audio_io_handle_t output) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(SUSPEND_OUTPUT, data, &reply); return reply.readInt32(); } - virtual status_t restoreOutput(int output) + virtual status_t restoreOutput(audio_io_handle_t output) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(RESTORE_OUTPUT, data, &reply); return reply.readInt32(); } - virtual int openInput(uint32_t *pDevices, + virtual audio_io_handle_t openInput(uint32_t *pDevices, uint32_t *pSamplingRate, - uint32_t *pFormat, + audio_format_t *pFormat, uint32_t *pChannels, - uint32_t acoustics) + audio_in_acoustics_t acoustics) { Parcel data, reply; uint32_t devices = pDevices ? *pDevices : 0; uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0; - uint32_t format = pFormat ? *pFormat : 0; + audio_format_t format = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT; uint32_t channels = pChannels ? *pChannels : 0; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); @@ -445,14 +449,14 @@ public: data.writeInt32(samplingRate); data.writeInt32(format); data.writeInt32(channels); - data.writeInt32(acoustics); + data.writeInt32((int32_t) acoustics); remote()->transact(OPEN_INPUT, data, &reply); - int input = reply.readInt32(); + audio_io_handle_t input = (audio_io_handle_t) reply.readInt32(); devices = reply.readInt32(); if (pDevices) *pDevices = devices; samplingRate = reply.readInt32(); if (pSamplingRate) *pSamplingRate = samplingRate; - format = reply.readInt32(); + format = (audio_format_t) reply.readInt32(); if (pFormat) *pFormat = format; channels = reply.readInt32(); if (pChannels) *pChannels = channels; @@ -468,12 +472,12 @@ public: return reply.readInt32(); } - virtual status_t setStreamOutput(uint32_t stream, int output) + virtual status_t setStreamOutput(audio_stream_type_t stream, audio_io_handle_t output) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(stream); - data.writeInt32(output); + data.writeInt32((int32_t) stream); + data.writeInt32((int32_t) output); remote()->transact(SET_STREAM_OUTPUT, data, &reply); return reply.readInt32(); } @@ -487,11 +491,12 @@ public: return reply.readInt32(); } - virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, int output) + virtual status_t getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames, + audio_io_handle_t output) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(output); + data.writeInt32((int32_t) output); remote()->transact(GET_RENDER_POSITION, data, &reply); status_t status = reply.readInt32(); if (status == NO_ERROR) { @@ -507,11 +512,11 @@ public: return status; } - virtual unsigned int getInputFramesLost(int ioHandle) + virtual unsigned int getInputFramesLost(audio_io_handle_t ioHandle) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); - data.writeInt32(ioHandle); + data.writeInt32((int32_t) ioHandle); remote()->transact(GET_INPUT_FRAMES_LOST, data, &reply); return reply.readInt32(); } @@ -544,7 +549,7 @@ public: remote()->transact(RELEASE_AUDIO_SESSION_ID, data, &reply); } - virtual status_t queryNumberEffects(uint32_t *numEffects) + virtual status_t queryNumberEffects(uint32_t *numEffects) const { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); @@ -556,13 +561,13 @@ public: if (status != NO_ERROR) { return status; } - if (numEffects) { + if (numEffects != NULL) { *numEffects = (uint32_t)reply.readInt32(); } return NO_ERROR; } - virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) + virtual status_t queryEffect(uint32_t index, effect_descriptor_t *pDescriptor) const { if (pDescriptor == NULL) { return BAD_VALUE; @@ -582,7 +587,8 @@ public: return NO_ERROR; } - virtual status_t getEffectDescriptor(effect_uuid_t *pUuid, effect_descriptor_t *pDescriptor) + virtual status_t getEffectDescriptor(const effect_uuid_t *pUuid, + effect_descriptor_t *pDescriptor) const { if (pUuid == NULL || pDescriptor == NULL) { return BAD_VALUE; @@ -606,7 +612,7 @@ public: effect_descriptor_t *pDesc, const sp<IEffectClient>& client, int32_t priority, - int output, + audio_io_handle_t output, int sessionId, status_t *status, int *id, @@ -627,7 +633,7 @@ public: data.write(pDesc, sizeof(effect_descriptor_t)); data.writeStrongBinder(client->asBinder()); data.writeInt32(priority); - data.writeInt32(output); + data.writeInt32((int32_t) output); data.writeInt32(sessionId); status_t lStatus = remote()->transact(CREATE_EFFECT, data, &reply); @@ -640,7 +646,7 @@ public: *id = tmp; } tmp = reply.readInt32(); - if (enabled) { + if (enabled != NULL) { *enabled = tmp; } effect = interface_cast<IEffect>(reply.readStrongBinder()); @@ -653,13 +659,14 @@ public: return effect; } - virtual status_t moveEffects(int session, int srcOutput, int dstOutput) + virtual status_t moveEffects(int session, audio_io_handle_t srcOutput, + audio_io_handle_t dstOutput) { Parcel data, reply; data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor()); data.writeInt32(session); - data.writeInt32(srcOutput); - data.writeInt32(dstOutput); + data.writeInt32((int32_t) srcOutput); + data.writeInt32((int32_t) dstOutput); remote()->transact(MOVE_EFFECTS, data, &reply); return reply.readInt32(); } @@ -678,17 +685,18 @@ status_t BnAudioFlinger::onTransact( pid_t pid = data.readInt32(); int streamType = data.readInt32(); uint32_t sampleRate = data.readInt32(); - int format = data.readInt32(); + audio_format_t format = (audio_format_t) data.readInt32(); int channelCount = data.readInt32(); size_t bufferCount = data.readInt32(); uint32_t flags = data.readInt32(); sp<IMemory> buffer = interface_cast<IMemory>(data.readStrongBinder()); - int output = data.readInt32(); + audio_io_handle_t output = (audio_io_handle_t) data.readInt32(); + bool isTimed = data.readInt32(); int sessionId = data.readInt32(); status_t status; sp<IAudioTrack> track = createTrack(pid, - streamType, sampleRate, format, - channelCount, bufferCount, flags, buffer, output, &sessionId, &status); + (audio_stream_type_t) streamType, sampleRate, format, + channelCount, bufferCount, flags, buffer, output, isTimed, &sessionId, &status); reply->writeInt32(sessionId); reply->writeInt32(status); reply->writeStrongBinder(track->asBinder()); @@ -697,9 +705,9 @@ status_t BnAudioFlinger::onTransact( case OPEN_RECORD: { CHECK_INTERFACE(IAudioFlinger, data, reply); pid_t pid = data.readInt32(); - int input = data.readInt32(); + audio_io_handle_t input = (audio_io_handle_t) data.readInt32(); uint32_t sampleRate = data.readInt32(); - int format = data.readInt32(); + audio_format_t format = (audio_format_t) data.readInt32(); int channelCount = data.readInt32(); size_t bufferCount = data.readInt32(); uint32_t flags = data.readInt32(); @@ -714,27 +722,27 @@ status_t BnAudioFlinger::onTransact( } break; case SAMPLE_RATE: { CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32( sampleRate(data.readInt32()) ); + reply->writeInt32( sampleRate((audio_io_handle_t) data.readInt32()) ); return NO_ERROR; } break; case CHANNEL_COUNT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32( channelCount(data.readInt32()) ); + reply->writeInt32( channelCount((audio_io_handle_t) data.readInt32()) ); return NO_ERROR; } break; case FORMAT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32( format(data.readInt32()) ); + reply->writeInt32( format((audio_io_handle_t) data.readInt32()) ); return NO_ERROR; } break; case FRAME_COUNT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32( frameCount(data.readInt32()) ); + reply->writeInt32( frameCount((audio_io_handle_t) data.readInt32()) ); return NO_ERROR; } break; case LATENCY: { CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32( latency(data.readInt32()) ); + reply->writeInt32( latency((audio_io_handle_t) data.readInt32()) ); return NO_ERROR; } break; case SET_MASTER_VOLUME: { @@ -761,32 +769,32 @@ status_t BnAudioFlinger::onTransact( CHECK_INTERFACE(IAudioFlinger, data, reply); int stream = data.readInt32(); float volume = data.readFloat(); - int output = data.readInt32(); - reply->writeInt32( setStreamVolume(stream, volume, output) ); + audio_io_handle_t output = (audio_io_handle_t) data.readInt32(); + reply->writeInt32( setStreamVolume((audio_stream_type_t) stream, volume, output) ); return NO_ERROR; } break; case SET_STREAM_MUTE: { CHECK_INTERFACE(IAudioFlinger, data, reply); int stream = data.readInt32(); - reply->writeInt32( setStreamMute(stream, data.readInt32()) ); + reply->writeInt32( setStreamMute((audio_stream_type_t) stream, data.readInt32()) ); return NO_ERROR; } break; case STREAM_VOLUME: { CHECK_INTERFACE(IAudioFlinger, data, reply); int stream = data.readInt32(); int output = data.readInt32(); - reply->writeFloat( streamVolume(stream, output) ); + reply->writeFloat( streamVolume((audio_stream_type_t) stream, output) ); return NO_ERROR; } break; case STREAM_MUTE: { CHECK_INTERFACE(IAudioFlinger, data, reply); int stream = data.readInt32(); - reply->writeInt32( streamMute(stream) ); + reply->writeInt32( streamMute((audio_stream_type_t) stream) ); return NO_ERROR; } break; case SET_MODE: { CHECK_INTERFACE(IAudioFlinger, data, reply); - int mode = data.readInt32(); + audio_mode_t mode = (audio_mode_t) data.readInt32(); reply->writeInt32( setMode(mode) ); return NO_ERROR; } break; @@ -803,14 +811,14 @@ status_t BnAudioFlinger::onTransact( } break; case SET_PARAMETERS: { CHECK_INTERFACE(IAudioFlinger, data, reply); - int ioHandle = data.readInt32(); + audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32(); String8 keyValuePairs(data.readString8()); reply->writeInt32(setParameters(ioHandle, keyValuePairs)); return NO_ERROR; } break; case GET_PARAMETERS: { CHECK_INTERFACE(IAudioFlinger, data, reply); - int ioHandle = data.readInt32(); + audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32(); String8 keys(data.readString8()); reply->writeString8(getParameters(ioHandle, keys)); return NO_ERROR; @@ -825,7 +833,7 @@ status_t BnAudioFlinger::onTransact( case GET_INPUTBUFFERSIZE: { CHECK_INTERFACE(IAudioFlinger, data, reply); uint32_t sampleRate = data.readInt32(); - int format = data.readInt32(); + audio_format_t format = (audio_format_t) data.readInt32(); int channelCount = data.readInt32(); reply->writeInt32( getInputBufferSize(sampleRate, format, channelCount) ); return NO_ERROR; @@ -834,18 +842,18 @@ status_t BnAudioFlinger::onTransact( CHECK_INTERFACE(IAudioFlinger, data, reply); uint32_t devices = data.readInt32(); uint32_t samplingRate = data.readInt32(); - uint32_t format = data.readInt32(); + audio_format_t format = (audio_format_t) data.readInt32(); uint32_t channels = data.readInt32(); uint32_t latency = data.readInt32(); uint32_t flags = data.readInt32(); - int output = openOutput(&devices, + audio_io_handle_t output = openOutput(&devices, &samplingRate, &format, &channels, &latency, flags); ALOGV("OPEN_OUTPUT output, %p", output); - reply->writeInt32(output); + reply->writeInt32((int32_t) output); reply->writeInt32(devices); reply->writeInt32(samplingRate); reply->writeInt32(format); @@ -855,40 +863,40 @@ status_t BnAudioFlinger::onTransact( } break; case OPEN_DUPLICATE_OUTPUT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - int output1 = data.readInt32(); - int output2 = data.readInt32(); - reply->writeInt32(openDuplicateOutput(output1, output2)); + audio_io_handle_t output1 = (audio_io_handle_t) data.readInt32(); + audio_io_handle_t output2 = (audio_io_handle_t) data.readInt32(); + reply->writeInt32((int32_t) openDuplicateOutput(output1, output2)); return NO_ERROR; } break; case CLOSE_OUTPUT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32(closeOutput(data.readInt32())); + reply->writeInt32(closeOutput((audio_io_handle_t) data.readInt32())); return NO_ERROR; } break; case SUSPEND_OUTPUT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32(suspendOutput(data.readInt32())); + reply->writeInt32(suspendOutput((audio_io_handle_t) data.readInt32())); return NO_ERROR; } break; case RESTORE_OUTPUT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32(restoreOutput(data.readInt32())); + reply->writeInt32(restoreOutput((audio_io_handle_t) data.readInt32())); return NO_ERROR; } break; case OPEN_INPUT: { CHECK_INTERFACE(IAudioFlinger, data, reply); uint32_t devices = data.readInt32(); uint32_t samplingRate = data.readInt32(); - uint32_t format = data.readInt32(); + audio_format_t format = (audio_format_t) data.readInt32(); uint32_t channels = data.readInt32(); - uint32_t acoutics = data.readInt32(); + audio_in_acoustics_t acoustics = (audio_in_acoustics_t) data.readInt32(); - int input = openInput(&devices, + audio_io_handle_t input = openInput(&devices, &samplingRate, &format, &channels, - acoutics); - reply->writeInt32(input); + acoustics); + reply->writeInt32((int32_t) input); reply->writeInt32(devices); reply->writeInt32(samplingRate); reply->writeInt32(format); @@ -897,14 +905,14 @@ status_t BnAudioFlinger::onTransact( } break; case CLOSE_INPUT: { CHECK_INTERFACE(IAudioFlinger, data, reply); - reply->writeInt32(closeInput(data.readInt32())); + reply->writeInt32(closeInput((audio_io_handle_t) data.readInt32())); return NO_ERROR; } break; case SET_STREAM_OUTPUT: { CHECK_INTERFACE(IAudioFlinger, data, reply); uint32_t stream = data.readInt32(); - int output = data.readInt32(); - reply->writeInt32(setStreamOutput(stream, output)); + audio_io_handle_t output = (audio_io_handle_t) data.readInt32(); + reply->writeInt32(setStreamOutput((audio_stream_type_t) stream, output)); return NO_ERROR; } break; case SET_VOICE_VOLUME: { @@ -915,7 +923,7 @@ status_t BnAudioFlinger::onTransact( } break; case GET_RENDER_POSITION: { CHECK_INTERFACE(IAudioFlinger, data, reply); - int output = data.readInt32(); + audio_io_handle_t output = (audio_io_handle_t) data.readInt32(); uint32_t halFrames; uint32_t dspFrames; status_t status = getRenderPosition(&halFrames, &dspFrames, output); @@ -928,7 +936,7 @@ status_t BnAudioFlinger::onTransact( } case GET_INPUT_FRAMES_LOST: { CHECK_INTERFACE(IAudioFlinger, data, reply); - int ioHandle = data.readInt32(); + audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32(); reply->writeInt32(getInputFramesLost(ioHandle)); return NO_ERROR; } break; @@ -988,7 +996,7 @@ status_t BnAudioFlinger::onTransact( data.read(&desc, sizeof(effect_descriptor_t)); sp<IEffectClient> client = interface_cast<IEffectClient>(data.readStrongBinder()); int32_t priority = data.readInt32(); - int output = data.readInt32(); + audio_io_handle_t output = (audio_io_handle_t) data.readInt32(); int sessionId = data.readInt32(); status_t status; int id; @@ -1005,8 +1013,8 @@ status_t BnAudioFlinger::onTransact( case MOVE_EFFECTS: { CHECK_INTERFACE(IAudioFlinger, data, reply); int session = data.readInt32(); - int srcOutput = data.readInt32(); - int dstOutput = data.readInt32(); + audio_io_handle_t srcOutput = (audio_io_handle_t) data.readInt32(); + audio_io_handle_t dstOutput = (audio_io_handle_t) data.readInt32(); reply->writeInt32(moveEffects(session, srcOutput, dstOutput)); return NO_ERROR; } break; diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp index 5a3f250..1db39a3 100644 --- a/media/libmedia/IAudioFlingerClient.cpp +++ b/media/libmedia/IAudioFlingerClient.cpp @@ -39,18 +39,18 @@ public: { } - void ioConfigChanged(int event, int ioHandle, void *param2) + void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) { Parcel data, reply; data.writeInterfaceToken(IAudioFlingerClient::getInterfaceDescriptor()); data.writeInt32(event); - data.writeInt32(ioHandle); + data.writeInt32((int32_t) ioHandle); if (event == AudioSystem::STREAM_CONFIG_CHANGED) { - uint32_t stream = *(uint32_t *)param2; + uint32_t stream = *(const uint32_t *)param2; ALOGV("ioConfigChanged stream %d", stream); data.writeInt32(stream); } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) { - AudioSystem::OutputDescriptor *desc = (AudioSystem::OutputDescriptor *)param2; + const AudioSystem::OutputDescriptor *desc = (const AudioSystem::OutputDescriptor *)param2; data.writeInt32(desc->samplingRate); data.writeInt32(desc->format); data.writeInt32(desc->channels); @@ -72,8 +72,8 @@ status_t BnAudioFlingerClient::onTransact( case IO_CONFIG_CHANGED: { CHECK_INTERFACE(IAudioFlingerClient, data, reply); int event = data.readInt32(); - int ioHandle = data.readInt32(); - void *param2 = 0; + audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32(); + const void *param2 = NULL; AudioSystem::OutputDescriptor desc; uint32_t stream; if (event == AudioSystem::STREAM_CONFIG_CHANGED) { diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp index 50b4855..da7c124 100644 --- a/media/libmedia/IAudioPolicyService.cpp +++ b/media/libmedia/IAudioPolicyService.cpp @@ -33,7 +33,7 @@ enum { SET_DEVICE_CONNECTION_STATE = IBinder::FIRST_CALL_TRANSACTION, GET_DEVICE_CONNECTION_STATE, SET_PHONE_STATE, - SET_RINGER_MODE, + SET_RINGER_MODE, // reserved, no longer used SET_FORCE_USE, GET_FORCE_USE, GET_OUTPUT, @@ -91,7 +91,7 @@ public: return static_cast <audio_policy_dev_state_t>(reply.readInt32()); } - virtual status_t setPhoneState(int state) + virtual status_t setPhoneState(audio_mode_t state) { Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); @@ -100,16 +100,6 @@ public: return static_cast <status_t> (reply.readInt32()); } - virtual status_t setRingerMode(uint32_t mode, uint32_t mask) - { - Parcel data, reply; - data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); - data.writeInt32(mode); - data.writeInt32(mask); - remote()->transact(SET_RINGER_MODE, data, &reply); - return static_cast <status_t> (reply.readInt32()); - } - virtual status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config) { Parcel data, reply; @@ -132,7 +122,7 @@ public: virtual audio_io_handle_t getOutput( audio_stream_type_t stream, uint32_t samplingRate, - uint32_t format, + audio_format_t format, uint32_t channels, audio_policy_output_flags_t flags) { @@ -154,7 +144,7 @@ public: Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); data.writeInt32(output); - data.writeInt32(stream); + data.writeInt32((int32_t) stream); data.writeInt32(session); remote()->transact(START_OUTPUT, data, &reply); return static_cast <status_t> (reply.readInt32()); @@ -167,7 +157,7 @@ public: Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); data.writeInt32(output); - data.writeInt32(stream); + data.writeInt32((int32_t) stream); data.writeInt32(session); remote()->transact(STOP_OUTPUT, data, &reply); return static_cast <status_t> (reply.readInt32()); @@ -182,16 +172,16 @@ public: } virtual audio_io_handle_t getInput( - int inputSource, + audio_source_t inputSource, uint32_t samplingRate, - uint32_t format, + audio_format_t format, uint32_t channels, audio_in_acoustics_t acoustics, int audioSession) { Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); - data.writeInt32(inputSource); + data.writeInt32((int32_t) inputSource); data.writeInt32(samplingRate); data.writeInt32(static_cast <uint32_t>(format)); data.writeInt32(channels); @@ -240,21 +230,28 @@ public: return static_cast <status_t> (reply.readInt32()); } - virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, int index) + virtual status_t setStreamVolumeIndex(audio_stream_type_t stream, + int index, + audio_devices_t device) { Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); data.writeInt32(static_cast <uint32_t>(stream)); data.writeInt32(index); + data.writeInt32(static_cast <uint32_t>(device)); remote()->transact(SET_STREAM_VOLUME, data, &reply); return static_cast <status_t> (reply.readInt32()); } - virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, int *index) + virtual status_t getStreamVolumeIndex(audio_stream_type_t stream, + int *index, + audio_devices_t device) { Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); data.writeInt32(static_cast <uint32_t>(stream)); + data.writeInt32(static_cast <uint32_t>(device)); + remote()->transact(GET_STREAM_VOLUME, data, &reply); int lIndex = reply.readInt32(); if (index) *index = lIndex; @@ -270,13 +267,13 @@ public: return reply.readInt32(); } - virtual uint32_t getDevicesForStream(audio_stream_type_t stream) + virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) { Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); data.writeInt32(static_cast <uint32_t>(stream)); remote()->transact(GET_DEVICES_FOR_STREAM, data, &reply); - return (uint32_t) reply.readInt32(); + return (audio_devices_t) reply.readInt32(); } virtual audio_io_handle_t getOutputForEffect(effect_descriptor_t *desc) @@ -324,11 +321,11 @@ public: return static_cast <status_t> (reply.readInt32()); } - virtual bool isStreamActive(int stream, uint32_t inPastMs) const + virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const { Parcel data, reply; data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor()); - data.writeInt32(stream); + data.writeInt32((int32_t) stream); data.writeInt32(inPastMs); remote()->transact(IS_STREAM_ACTIVE, data, &reply); return reply.readInt32(); @@ -394,15 +391,7 @@ status_t BnAudioPolicyService::onTransact( case SET_PHONE_STATE: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - reply->writeInt32(static_cast <uint32_t>(setPhoneState(data.readInt32()))); - return NO_ERROR; - } break; - - case SET_RINGER_MODE: { - CHECK_INTERFACE(IAudioPolicyService, data, reply); - uint32_t mode = data.readInt32(); - uint32_t mask = data.readInt32(); - reply->writeInt32(static_cast <uint32_t>(setRingerMode(mode, mask))); + reply->writeInt32(static_cast <uint32_t>(setPhoneState((audio_mode_t) data.readInt32()))); return NO_ERROR; } break; @@ -427,7 +416,7 @@ status_t BnAudioPolicyService::onTransact( audio_stream_type_t stream = static_cast <audio_stream_type_t>(data.readInt32()); uint32_t samplingRate = data.readInt32(); - uint32_t format = data.readInt32(); + audio_format_t format = (audio_format_t) data.readInt32(); uint32_t channels = data.readInt32(); audio_policy_output_flags_t flags = static_cast <audio_policy_output_flags_t>(data.readInt32()); @@ -472,9 +461,9 @@ status_t BnAudioPolicyService::onTransact( case GET_INPUT: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - int inputSource = data.readInt32(); + audio_source_t inputSource = (audio_source_t) data.readInt32(); uint32_t samplingRate = data.readInt32(); - uint32_t format = data.readInt32(); + audio_format_t format = (audio_format_t) data.readInt32(); uint32_t channels = data.readInt32(); audio_in_acoustics_t acoustics = static_cast <audio_in_acoustics_t>(data.readInt32()); @@ -525,7 +514,10 @@ status_t BnAudioPolicyService::onTransact( audio_stream_type_t stream = static_cast <audio_stream_type_t>(data.readInt32()); int index = data.readInt32(); - reply->writeInt32(static_cast <uint32_t>(setStreamVolumeIndex(stream, index))); + audio_devices_t device = static_cast <audio_devices_t>(data.readInt32()); + reply->writeInt32(static_cast <uint32_t>(setStreamVolumeIndex(stream, + index, + device))); return NO_ERROR; } break; @@ -533,8 +525,9 @@ status_t BnAudioPolicyService::onTransact( CHECK_INTERFACE(IAudioPolicyService, data, reply); audio_stream_type_t stream = static_cast <audio_stream_type_t>(data.readInt32()); + audio_devices_t device = static_cast <audio_devices_t>(data.readInt32()); int index; - status_t status = getStreamVolumeIndex(stream, &index); + status_t status = getStreamVolumeIndex(stream, &index, device); reply->writeInt32(index); reply->writeInt32(static_cast <uint32_t>(status)); return NO_ERROR; @@ -598,9 +591,9 @@ status_t BnAudioPolicyService::onTransact( case IS_STREAM_ACTIVE: { CHECK_INTERFACE(IAudioPolicyService, data, reply); - int stream = data.readInt32(); + audio_stream_type_t stream = (audio_stream_type_t) data.readInt32(); uint32_t inPastMs = (uint32_t)data.readInt32(); - reply->writeInt32( isStreamActive(stream, inPastMs) ); + reply->writeInt32( isStreamActive((audio_stream_type_t) stream, inPastMs) ); return NO_ERROR; } break; diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp index 8c7a960..6b473c9 100644 --- a/media/libmedia/IAudioRecord.cpp +++ b/media/libmedia/IAudioRecord.cpp @@ -42,10 +42,11 @@ public: { } - virtual status_t start() + virtual status_t start(pid_t tid) { Parcel data, reply; data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor()); + data.writeInt32(tid); status_t status = remote()->transact(START, data, &reply); if (status == NO_ERROR) { status = reply.readInt32(); @@ -90,7 +91,7 @@ status_t BnAudioRecord::onTransact( } break; case START: { CHECK_INTERFACE(IAudioRecord, data, reply); - reply->writeInt32(start()); + reply->writeInt32(start(data.readInt32())); return NO_ERROR; } break; case STOP: { diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp index 0b372f3..28ebbbf 100644 --- a/media/libmedia/IAudioTrack.cpp +++ b/media/libmedia/IAudioTrack.cpp @@ -1,4 +1,4 @@ -/* //device/extlibs/pv/android/IAudioTrack.cpp +/* ** ** Copyright 2007, The Android Open Source Project ** @@ -35,7 +35,10 @@ enum { FLUSH, MUTE, PAUSE, - ATTACH_AUX_EFFECT + ATTACH_AUX_EFFECT, + ALLOCATE_TIMED_BUFFER, + QUEUE_TIMED_BUFFER, + SET_MEDIA_TIME_TRANSFORM, }; class BpAudioTrack : public BpInterface<IAudioTrack> @@ -46,10 +49,23 @@ public: { } - virtual status_t start() + virtual sp<IMemory> getCblk() const + { + Parcel data, reply; + sp<IMemory> cblk; + data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor()); + status_t status = remote()->transact(GET_CBLK, data, &reply); + if (status == NO_ERROR) { + cblk = interface_cast<IMemory>(reply.readStrongBinder()); + } + return cblk; + } + + virtual status_t start(pid_t tid) { Parcel data, reply; data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor()); + data.writeInt32(tid); status_t status = remote()->transact(START, data, &reply); if (status == NO_ERROR) { status = reply.readInt32(); @@ -88,28 +104,62 @@ public: remote()->transact(PAUSE, data, &reply); } - virtual sp<IMemory> getCblk() const + virtual status_t attachAuxEffect(int effectId) { Parcel data, reply; - sp<IMemory> cblk; data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor()); - status_t status = remote()->transact(GET_CBLK, data, &reply); + data.writeInt32(effectId); + status_t status = remote()->transact(ATTACH_AUX_EFFECT, data, &reply); if (status == NO_ERROR) { - cblk = interface_cast<IMemory>(reply.readStrongBinder()); + status = reply.readInt32(); + } else { + ALOGW("attachAuxEffect() error: %s", strerror(-status)); } - return cblk; + return status; } - virtual status_t attachAuxEffect(int effectId) - { + virtual status_t allocateTimedBuffer(size_t size, sp<IMemory>* buffer) { Parcel data, reply; data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor()); - data.writeInt32(effectId); - status_t status = remote()->transact(ATTACH_AUX_EFFECT, data, &reply); + data.writeInt32(size); + status_t status = remote()->transact(ALLOCATE_TIMED_BUFFER, + data, &reply); + if (status == NO_ERROR) { + status = reply.readInt32(); + if (status == NO_ERROR) { + *buffer = interface_cast<IMemory>(reply.readStrongBinder()); + } + } + return status; + } + + virtual status_t queueTimedBuffer(const sp<IMemory>& buffer, + int64_t pts) { + Parcel data, reply; + data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor()); + data.writeStrongBinder(buffer->asBinder()); + data.writeInt64(pts); + status_t status = remote()->transact(QUEUE_TIMED_BUFFER, + data, &reply); + if (status == NO_ERROR) { + status = reply.readInt32(); + } + return status; + } + + virtual status_t setMediaTimeTransform(const LinearTransform& xform, + int target) { + Parcel data, reply; + data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor()); + data.writeInt64(xform.a_zero); + data.writeInt64(xform.b_zero); + data.writeInt32(xform.a_to_b_numer); + data.writeInt32(xform.a_to_b_denom); + data.writeInt32(target); + status_t status = remote()->transact(SET_MEDIA_TIME_TRANSFORM, + data, &reply); if (status == NO_ERROR) { status = reply.readInt32(); - } else { - ALOGW("attachAuxEffect() error: %s", strerror(-status)); } return status; } @@ -130,7 +180,7 @@ status_t BnAudioTrack::onTransact( } break; case START: { CHECK_INTERFACE(IAudioTrack, data, reply); - reply->writeInt32(start()); + reply->writeInt32(start(data.readInt32())); return NO_ERROR; } break; case STOP: { @@ -158,10 +208,38 @@ status_t BnAudioTrack::onTransact( reply->writeInt32(attachAuxEffect(data.readInt32())); return NO_ERROR; } break; + case ALLOCATE_TIMED_BUFFER: { + CHECK_INTERFACE(IAudioTrack, data, reply); + sp<IMemory> buffer; + status_t status = allocateTimedBuffer(data.readInt32(), &buffer); + reply->writeInt32(status); + if (status == NO_ERROR) { + reply->writeStrongBinder(buffer->asBinder()); + } + return NO_ERROR; + } break; + case QUEUE_TIMED_BUFFER: { + CHECK_INTERFACE(IAudioTrack, data, reply); + sp<IMemory> buffer = interface_cast<IMemory>( + data.readStrongBinder()); + uint64_t pts = data.readInt64(); + reply->writeInt32(queueTimedBuffer(buffer, pts)); + return NO_ERROR; + } break; + case SET_MEDIA_TIME_TRANSFORM: { + CHECK_INTERFACE(IAudioTrack, data, reply); + LinearTransform xform; + xform.a_zero = data.readInt64(); + xform.b_zero = data.readInt64(); + xform.a_to_b_numer = data.readInt32(); + xform.a_to_b_denom = data.readInt32(); + int target = data.readInt32(); + reply->writeInt32(setMediaTimeTransform(xform, target)); + return NO_ERROR; + } break; default: return BBinder::onTransact(code, data, reply, flags); } } }; // namespace android - diff --git a/media/libmedia/IEffect.cpp b/media/libmedia/IEffect.cpp index d469e28..5d40cc8 100644 --- a/media/libmedia/IEffect.cpp +++ b/media/libmedia/IEffect.cpp @@ -83,8 +83,15 @@ public: size = *pReplySize; } data.writeInt32(size); - remote()->transact(COMMAND, data, &reply); - status_t status = reply.readInt32(); + + status_t status = remote()->transact(COMMAND, data, &reply); + if (status != NO_ERROR) { + if (pReplySize != NULL) + *pReplySize = 0; + return status; + } + + status = reply.readInt32(); size = reply.readInt32(); if (size != 0 && pReplyData != NULL && pReplySize != NULL) { reply.read(pReplyData, size); diff --git a/media/libmedia/IMediaDeathNotifier.cpp b/media/libmedia/IMediaDeathNotifier.cpp index 8525482..aeb35a5 100644 --- a/media/libmedia/IMediaDeathNotifier.cpp +++ b/media/libmedia/IMediaDeathNotifier.cpp @@ -36,7 +36,7 @@ IMediaDeathNotifier::getMediaPlayerService() { ALOGV("getMediaPlayerService"); Mutex::Autolock _l(sServiceLock); - if (sMediaPlayerService.get() == 0) { + if (sMediaPlayerService == 0) { sp<IServiceManager> sm = defaultServiceManager(); sp<IBinder> binder; do { diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp index 9c1e6b7..c47fa41 100644 --- a/media/libmedia/IMediaPlayer.cpp +++ b/media/libmedia/IMediaPlayer.cpp @@ -15,6 +15,7 @@ ** limitations under the License. */ +#include <arpa/inet.h> #include <stdint.h> #include <sys/types.h> @@ -23,8 +24,6 @@ #include <media/IMediaPlayer.h> #include <media/IStreamSource.h> -#include <surfaceflinger/ISurface.h> -#include <surfaceflinger/Surface.h> #include <gui/ISurfaceTexture.h> #include <utils/String8.h> @@ -55,6 +54,7 @@ enum { SET_VIDEO_SURFACETEXTURE, SET_PARAMETER, GET_PARAMETER, + SET_RETRANSMIT_ENDPOINT, }; class BpMediaPlayer: public BpInterface<IMediaPlayer> @@ -198,11 +198,11 @@ public: return reply.readInt32(); } - status_t setAudioStreamType(int type) + status_t setAudioStreamType(audio_stream_type_t stream) { Parcel data, reply; data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor()); - data.writeInt32(type); + data.writeInt32((int32_t) stream); remote()->transact(SET_AUDIO_STREAM_TYPE, data, &reply); return reply.readInt32(); } @@ -291,6 +291,25 @@ public: return remote()->transact(GET_PARAMETER, data, reply); } + status_t setRetransmitEndpoint(const struct sockaddr_in* endpoint) { + Parcel data, reply; + status_t err; + + data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor()); + if (NULL != endpoint) { + data.writeInt32(sizeof(*endpoint)); + data.write(endpoint, sizeof(*endpoint)); + } else { + data.writeInt32(0); + } + + err = remote()->transact(SET_RETRANSMIT_ENDPOINT, data, &reply); + if (OK != err) { + return err; + } + + return reply.readInt32(); + } }; IMPLEMENT_META_INTERFACE(MediaPlayer, "android.media.IMediaPlayer"); @@ -397,7 +416,7 @@ status_t BnMediaPlayer::onTransact( } break; case SET_AUDIO_STREAM_TYPE: { CHECK_INTERFACE(IMediaPlayer, data, reply); - reply->writeInt32(setAudioStreamType(data.readInt32())); + reply->writeInt32(setAudioStreamType((audio_stream_type_t) data.readInt32())); return NO_ERROR; } break; case SET_LOOPING: { @@ -459,6 +478,20 @@ status_t BnMediaPlayer::onTransact( CHECK_INTERFACE(IMediaPlayer, data, reply); return getParameter(data.readInt32(), reply); } break; + case SET_RETRANSMIT_ENDPOINT: { + CHECK_INTERFACE(IMediaPlayer, data, reply); + + struct sockaddr_in endpoint; + int amt = data.readInt32(); + if (amt == sizeof(endpoint)) { + data.read(&endpoint, sizeof(struct sockaddr_in)); + reply->writeInt32(setRetransmitEndpoint(&endpoint)); + } else { + reply->writeInt32(setRetransmitEndpoint(NULL)); + } + + return NO_ERROR; + } break; default: return BBinder::onTransact(code, data, reply, flags); } diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp index 8e4dd04..f5b5cbd 100644 --- a/media/libmedia/IMediaPlayerService.cpp +++ b/media/libmedia/IMediaPlayerService.cpp @@ -78,7 +78,7 @@ public: return interface_cast<IMediaRecorder>(reply.readStrongBinder()); } - virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) + virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) { Parcel data, reply; data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor()); @@ -86,11 +86,11 @@ public: remote()->transact(DECODE_URL, data, &reply); *pSampleRate = uint32_t(reply.readInt32()); *pNumChannels = reply.readInt32(); - *pFormat = reply.readInt32(); + *pFormat = (audio_format_t) reply.readInt32(); return interface_cast<IMemory>(reply.readStrongBinder()); } - virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) + virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) { Parcel data, reply; data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor()); @@ -100,7 +100,7 @@ public: remote()->transact(DECODE_FD, data, &reply); *pSampleRate = uint32_t(reply.readInt32()); *pNumChannels = reply.readInt32(); - *pFormat = reply.readInt32(); + *pFormat = (audio_format_t) reply.readInt32(); return interface_cast<IMemory>(reply.readStrongBinder()); } @@ -148,11 +148,11 @@ status_t BnMediaPlayerService::onTransact( const char* url = data.readCString(); uint32_t sampleRate; int numChannels; - int format; + audio_format_t format; sp<IMemory> player = decode(url, &sampleRate, &numChannels, &format); reply->writeInt32(sampleRate); reply->writeInt32(numChannels); - reply->writeInt32(format); + reply->writeInt32((int32_t) format); reply->writeStrongBinder(player->asBinder()); return NO_ERROR; } break; @@ -163,11 +163,11 @@ status_t BnMediaPlayerService::onTransact( int64_t length = data.readInt64(); uint32_t sampleRate; int numChannels; - int format; + audio_format_t format; sp<IMemory> player = decode(fd, offset, length, &sampleRate, &numChannels, &format); reply->writeInt32(sampleRate); reply->writeInt32(numChannels); - reply->writeInt32(format); + reply->writeInt32((int32_t) format); reply->writeStrongBinder(player->asBinder()); return NO_ERROR; } break; diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp index 42f55c2..2f4e31a 100644 --- a/media/libmedia/IMediaRecorder.cpp +++ b/media/libmedia/IMediaRecorder.cpp @@ -19,10 +19,10 @@ #define LOG_TAG "IMediaRecorder" #include <utils/Log.h> #include <binder/Parcel.h> -#include <surfaceflinger/Surface.h> #include <camera/ICamera.h> #include <media/IMediaRecorderClient.h> #include <media/IMediaRecorder.h> +#include <gui/Surface.h> #include <gui/ISurfaceTexture.h> #include <unistd.h> diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp index d2f5f71..48e427a 100644 --- a/media/libmedia/IOMX.cpp +++ b/media/libmedia/IOMX.cpp @@ -22,8 +22,6 @@ #include <binder/Parcel.h> #include <media/IOMX.h> #include <media/stagefright/foundation/ADebug.h> -#include <surfaceflinger/ISurface.h> -#include <surfaceflinger/Surface.h> namespace android { @@ -59,9 +57,10 @@ public: : BpInterface<IOMX>(impl) { } - virtual bool livesLocally(pid_t pid) { + virtual bool livesLocally(node_id node, pid_t pid) { Parcel data, reply; data.writeInterfaceToken(IOMX::getInterfaceDescriptor()); + data.writeIntPtr((intptr_t)node); data.writeInt32(pid); remote()->transact(LIVES_LOCALLY, data, &reply); @@ -417,7 +416,9 @@ status_t BnOMX::onTransact( case LIVES_LOCALLY: { CHECK_INTERFACE(IOMX, data, reply); - reply->writeInt32(livesLocally((pid_t)data.readInt32())); + node_id node = (void *)data.readIntPtr(); + pid_t pid = (pid_t)data.readInt32(); + reply->writeInt32(livesLocally(node, pid)); return OK; } diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp index fa5b67a..6cb5b82 100644 --- a/media/libmedia/JetPlayer.cpp +++ b/media/libmedia/JetPlayer.cpp @@ -91,7 +91,7 @@ int JetPlayer::init() mAudioTrack = new AudioTrack(); mAudioTrack->set(AUDIO_STREAM_MUSIC, //TODO parametrize this pLibConfig->sampleRate, - 1, // format = PCM 16bits per sample, + AUDIO_FORMAT_PCM_16_BIT, (pLibConfig->numChannels == 2) ? AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO, mTrackBufferSize, 0); @@ -100,7 +100,8 @@ int JetPlayer::init() { Mutex::Autolock l(mMutex); ALOGV("JetPlayer::init(): trying to start render thread"); - createThreadEtc(renderThread, this, "jetRenderThread", ANDROID_PRIORITY_AUDIO); + mThread = new JetPlayerThread(this); + mThread->run("jetRenderThread", ANDROID_PRIORITY_AUDIO); mCondition.wait(mMutex); } if (mTid > 0) { @@ -156,12 +157,6 @@ int JetPlayer::release() //------------------------------------------------------------------------------------------------- -int JetPlayer::renderThread(void* p) { - - return ((JetPlayer*)p)->render(); -} - -//------------------------------------------------------------------------------------------------- int JetPlayer::render() { EAS_RESULT result = EAS_FAILURE; EAS_I32 count; @@ -173,10 +168,6 @@ int JetPlayer::render() { // allocate render buffer mAudioBuffer = new EAS_PCM[pLibConfig->mixBufferSize * pLibConfig->numChannels * MIX_NUM_BUFFERS]; - if (!mAudioBuffer) { - ALOGE("JetPlayer::render(): mAudioBuffer allocate failed"); - goto threadExit; - } // signal main thread that we started { @@ -255,14 +246,12 @@ int JetPlayer::render() { }//while (1) threadExit: - if (mAudioTrack) { + if (mAudioTrack != NULL) { mAudioTrack->stop(); mAudioTrack->flush(); } - if (mAudioBuffer) { - delete [] mAudioBuffer; - mAudioBuffer = NULL; - } + delete [] mAudioBuffer; + mAudioBuffer = NULL; mMutex.lock(); mTid = -1; mCondition.signal(); @@ -343,8 +332,8 @@ int JetPlayer::loadFromFile(const char* path) Mutex::Autolock lock(mMutex); mEasJetFileLoc = (EAS_FILE_LOCATOR) malloc(sizeof(EAS_FILE)); - memset(mJetFilePath, 0, 256); - strncpy(mJetFilePath, path, strlen(path)); + strncpy(mJetFilePath, path, sizeof(mJetFilePath)); + mJetFilePath[sizeof(mJetFilePath) - 1] = '\0'; mEasJetFileLoc->path = mJetFilePath; mEasJetFileLoc->fd = 0; diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp index c905762..93ddca8 100644 --- a/media/libmedia/MediaProfiles.cpp +++ b/media/libmedia/MediaProfiles.cpp @@ -25,7 +25,7 @@ #include <cutils/properties.h> #include <expat.h> #include <media/MediaProfiles.h> -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/openmax/OMX_Video.h> namespace android { @@ -349,7 +349,7 @@ void MediaProfiles::addImageEncodingQualityLevel(int cameraId, const char** atts { CHECK(!strcmp("quality", atts[0])); int quality = atoi(atts[1]); - ALOGV("%s: cameraId=%d, quality=%d\n", __func__, cameraId, quality); + ALOGV("%s: cameraId=%d, quality=%d", __func__, cameraId, quality); ImageEncodingQualityLevels *levels = findImageEncodingQualityLevels(cameraId); if (levels == NULL) { diff --git a/media/libmedia/MediaScanner.cpp b/media/libmedia/MediaScanner.cpp index 79cab74..73d4519 100644 --- a/media/libmedia/MediaScanner.cpp +++ b/media/libmedia/MediaScanner.cpp @@ -143,7 +143,7 @@ MediaScanResult MediaScanner::doProcessDirectory( if (pathRemaining >= 8 /* strlen(".nomedia") */ ) { strcpy(fileSpot, ".nomedia"); if (access(path, F_OK) == 0) { - ALOGV("found .nomedia, setting noMedia flag\n"); + ALOGV("found .nomedia, setting noMedia flag"); noMedia = true; } diff --git a/media/libmedia/MediaScannerClient.cpp b/media/libmedia/MediaScannerClient.cpp index 40b8188..cdfd477 100644 --- a/media/libmedia/MediaScannerClient.cpp +++ b/media/libmedia/MediaScannerClient.cpp @@ -142,12 +142,12 @@ void MediaScannerClient::convertValues(uint32_t encoding) UConverter *conv = ucnv_open(enc, &status); if (U_FAILURE(status)) { - ALOGE("could not create UConverter for %s\n", enc); + ALOGE("could not create UConverter for %s", enc); return; } UConverter *utf8Conv = ucnv_open("UTF-8", &status); if (U_FAILURE(status)) { - ALOGE("could not create UConverter for UTF-8\n"); + ALOGE("could not create UConverter for UTF-8"); ucnv_close(conv); return; } @@ -173,6 +173,7 @@ void MediaScannerClient::convertValues(uint32_t encoding) const char* source = mValues->getEntry(i); int targetLength = len * 3 + 1; char* buffer = new char[targetLength]; + // don't normally check for NULL, but in this case targetLength may be large if (!buffer) break; char* target = buffer; @@ -180,7 +181,7 @@ void MediaScannerClient::convertValues(uint32_t encoding) ucnv_convertEx(utf8Conv, conv, &target, target + targetLength, &source, (const char *)dest, NULL, NULL, NULL, NULL, TRUE, TRUE, &status); if (U_FAILURE(status)) { - ALOGE("ucnv_convertEx failed: %d\n", status); + ALOGE("ucnv_convertEx failed: %d", status); mValues->setEntry(i, "???"); } else { // zero terminate diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp index 35dfbb8..54eb98a 100644 --- a/media/libmedia/ToneGenerator.cpp +++ b/media/libmedia/ToneGenerator.cpp @@ -751,7 +751,7 @@ const ToneGenerator::ToneDescriptor ToneGenerator::sToneDescriptors[] = { // Used by ToneGenerator::getToneForRegion() to convert user specified supervisory tone type // to actual tone for current region. -const unsigned char ToneGenerator::sToneMappingTable[NUM_REGIONS-1][NUM_SUP_TONES] = { +const unsigned char /*tone_type*/ ToneGenerator::sToneMappingTable[NUM_REGIONS-1][NUM_SUP_TONES] = { { // ANSI TONE_ANSI_DIAL, // TONE_SUP_DIAL TONE_ANSI_BUSY, // TONE_SUP_BUSY @@ -791,16 +791,16 @@ const unsigned char ToneGenerator::sToneMappingTable[NUM_REGIONS-1][NUM_SUP_TONE // generators, instantiates output audio track. // // Input: -// streamType: Type of stream used for tone playback (enum AudioTrack::stream_type) +// streamType: Type of stream used for tone playback // volume: volume applied to tone (0.0 to 1.0) // // Output: // none // //////////////////////////////////////////////////////////////////////////////// -ToneGenerator::ToneGenerator(int streamType, float volume, bool threadCanCallJava) { +ToneGenerator::ToneGenerator(audio_stream_type_t streamType, float volume, bool threadCanCallJava) { - ALOGV("ToneGenerator constructor: streamType=%d, volume=%f\n", streamType, volume); + ALOGV("ToneGenerator constructor: streamType=%d, volume=%f", streamType, volume); mState = TONE_IDLE; @@ -811,9 +811,9 @@ ToneGenerator::ToneGenerator(int streamType, float volume, bool threadCanCallJav mThreadCanCallJava = threadCanCallJava; mStreamType = streamType; mVolume = volume; - mpAudioTrack = 0; - mpToneDesc = 0; - mpNewToneDesc = 0; + mpAudioTrack = NULL; + mpToneDesc = NULL; + mpNewToneDesc = NULL; // Generate tone by chunks of 20 ms to keep cadencing precision mProcessSize = (mSamplingRate * 20) / 1000; @@ -829,9 +829,9 @@ ToneGenerator::ToneGenerator(int streamType, float volume, bool threadCanCallJav } if (initAudioTrack()) { - ALOGV("ToneGenerator INIT OK, time: %d\n", (unsigned int)(systemTime()/1000000)); + ALOGV("ToneGenerator INIT OK, time: %d", (unsigned int)(systemTime()/1000000)); } else { - ALOGV("!!!ToneGenerator INIT FAILED!!!\n"); + ALOGV("!!!ToneGenerator INIT FAILED!!!"); } } @@ -853,11 +853,11 @@ ToneGenerator::ToneGenerator(int streamType, float volume, bool threadCanCallJav // //////////////////////////////////////////////////////////////////////////////// ToneGenerator::~ToneGenerator() { - ALOGV("ToneGenerator destructor\n"); + ALOGV("ToneGenerator destructor"); - if (mpAudioTrack) { + if (mpAudioTrack != NULL) { stopTone(); - ALOGV("Delete Track: %p\n", mpAudioTrack); + ALOGV("Delete Track: %p", mpAudioTrack); delete mpAudioTrack; } } @@ -878,7 +878,7 @@ ToneGenerator::~ToneGenerator() { // none // //////////////////////////////////////////////////////////////////////////////// -bool ToneGenerator::startTone(int toneType, int durationMs) { +bool ToneGenerator::startTone(tone_type toneType, int durationMs) { bool lResult = false; status_t lStatus; @@ -892,7 +892,7 @@ bool ToneGenerator::startTone(int toneType, int durationMs) { } } - ALOGV("startTone\n"); + ALOGV("startTone"); mLock.lock(); @@ -915,7 +915,7 @@ bool ToneGenerator::startTone(int toneType, int durationMs) { if (mState == TONE_INIT) { if (prepareWave()) { - ALOGV("Immediate start, time %d\n", (unsigned int)(systemTime()/1000000)); + ALOGV("Immediate start, time %d", (unsigned int)(systemTime()/1000000)); lResult = true; mState = TONE_STARTING; mLock.unlock(); @@ -934,7 +934,7 @@ bool ToneGenerator::startTone(int toneType, int durationMs) { mState = TONE_IDLE; } } else { - ALOGV("Delayed start\n"); + ALOGV("Delayed start"); mState = TONE_RESTARTING; lStatus = mWaitCbkCond.waitRelative(mLock, seconds(3)); if (lStatus == NO_ERROR) { @@ -949,8 +949,8 @@ bool ToneGenerator::startTone(int toneType, int durationMs) { } mLock.unlock(); - ALOGV_IF(lResult, "Tone started, time %d\n", (unsigned int)(systemTime()/1000000)); - ALOGW_IF(!lResult, "Tone start failed!!!, time %d\n", (unsigned int)(systemTime()/1000000)); + ALOGV_IF(lResult, "Tone started, time %d", (unsigned int)(systemTime()/1000000)); + ALOGW_IF(!lResult, "Tone start failed!!!, time %d", (unsigned int)(systemTime()/1000000)); return lResult; } @@ -1012,16 +1012,12 @@ bool ToneGenerator::initAudioTrack() { if (mpAudioTrack) { delete mpAudioTrack; - mpAudioTrack = 0; + mpAudioTrack = NULL; } // Open audio track in mono, PCM 16bit, default sampling rate, default buffer size mpAudioTrack = new AudioTrack(); - if (mpAudioTrack == 0) { - ALOGE("AudioTrack allocation failed"); - goto initAudioTrack_exit; - } - ALOGV("Create Track: %p\n", mpAudioTrack); + ALOGV("Create Track: %p", mpAudioTrack); mpAudioTrack->set(mStreamType, 0, @@ -1049,10 +1045,10 @@ bool ToneGenerator::initAudioTrack() { initAudioTrack_exit: // Cleanup - if (mpAudioTrack) { - ALOGV("Delete Track I: %p\n", mpAudioTrack); + if (mpAudioTrack != NULL) { + ALOGV("Delete Track I: %p", mpAudioTrack); delete mpAudioTrack; - mpAudioTrack = 0; + mpAudioTrack = NULL; } return false; @@ -1145,7 +1141,7 @@ void ToneGenerator::audioCallback(int event, void* user, void *info) { if (lpToneGen->mTotalSmp > lpToneGen->mNextSegSmp) { // Time to go to next sequence segment - ALOGV("End Segment, time: %d\n", (unsigned int)(systemTime()/1000000)); + ALOGV("End Segment, time: %d", (unsigned int)(systemTime()/1000000)); lGenSmp = lReqSmp; @@ -1160,13 +1156,13 @@ void ToneGenerator::audioCallback(int event, void* user, void *info) { lpWaveGen->getSamples(lpOut, lGenSmp, lWaveCmd); lFrequency = lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[++lFreqIdx]; } - ALOGV("ON->OFF, lGenSmp: %d, lReqSmp: %d\n", lGenSmp, lReqSmp); + ALOGV("ON->OFF, lGenSmp: %d, lReqSmp: %d", lGenSmp, lReqSmp); } // check if we need to loop and loop for the reqd times if (lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) { if (lpToneGen->mLoopCounter < lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt) { - ALOGV ("in if loop loopCnt(%d) loopctr(%d), CurSeg(%d) \n", + ALOGV ("in if loop loopCnt(%d) loopctr(%d), CurSeg(%d)", lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt, lpToneGen->mLoopCounter, lpToneGen->mCurSegment); @@ -1176,14 +1172,14 @@ void ToneGenerator::audioCallback(int event, void* user, void *info) { // completed loop. go to next segment lpToneGen->mLoopCounter = 0; lpToneGen->mCurSegment++; - ALOGV ("in else loop loopCnt(%d) loopctr(%d), CurSeg(%d) \n", + ALOGV ("in else loop loopCnt(%d) loopctr(%d), CurSeg(%d)", lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt, lpToneGen->mLoopCounter, lpToneGen->mCurSegment); } } else { lpToneGen->mCurSegment++; - ALOGV ("Goto next seg loopCnt(%d) loopctr(%d), CurSeg(%d) \n", + ALOGV ("Goto next seg loopCnt(%d) loopctr(%d), CurSeg(%d)", lpToneDesc->segments[lpToneGen->mCurSegment].loopCnt, lpToneGen->mLoopCounter, lpToneGen->mCurSegment); @@ -1192,32 +1188,32 @@ void ToneGenerator::audioCallback(int event, void* user, void *info) { // Handle loop if last segment reached if (lpToneDesc->segments[lpToneGen->mCurSegment].duration == 0) { - ALOGV("Last Seg: %d\n", lpToneGen->mCurSegment); + ALOGV("Last Seg: %d", lpToneGen->mCurSegment); // Pre increment loop count and restart if total count not reached. Stop sequence otherwise if (++lpToneGen->mCurCount <= lpToneDesc->repeatCnt) { - ALOGV("Repeating Count: %d\n", lpToneGen->mCurCount); + ALOGV("Repeating Count: %d", lpToneGen->mCurCount); lpToneGen->mCurSegment = lpToneDesc->repeatSegment; if (lpToneDesc->segments[lpToneDesc->repeatSegment].waveFreq[0] != 0) { lWaveCmd = WaveGenerator::WAVEGEN_START; } - ALOGV("New segment %d, Next Time: %d\n", lpToneGen->mCurSegment, + ALOGV("New segment %d, Next Time: %d", lpToneGen->mCurSegment, (lpToneGen->mNextSegSmp*1000)/lpToneGen->mSamplingRate); } else { lGenSmp = 0; - ALOGV("End repeat, time: %d\n", (unsigned int)(systemTime()/1000000)); + ALOGV("End repeat, time: %d", (unsigned int)(systemTime()/1000000)); } } else { - ALOGV("New segment %d, Next Time: %d\n", lpToneGen->mCurSegment, + ALOGV("New segment %d, Next Time: %d", lpToneGen->mCurSegment, (lpToneGen->mNextSegSmp*1000)/lpToneGen->mSamplingRate); if (lpToneDesc->segments[lpToneGen->mCurSegment].waveFreq[0] != 0) { // If next segment is not silent, OFF -> ON transition : reset wave generator lWaveCmd = WaveGenerator::WAVEGEN_START; - ALOGV("OFF->ON, lGenSmp: %d, lReqSmp: %d\n", lGenSmp, lReqSmp); + ALOGV("OFF->ON, lGenSmp: %d, lReqSmp: %d", lGenSmp, lReqSmp); } else { lGenSmp = 0; } @@ -1255,13 +1251,13 @@ audioCallback_EndLoop: switch (lpToneGen->mState) { case TONE_RESTARTING: - ALOGV("Cbk restarting track\n"); + ALOGV("Cbk restarting track"); if (lpToneGen->prepareWave()) { lpToneGen->mState = TONE_STARTING; // must reload lpToneDesc as prepareWave() may change mpToneDesc lpToneDesc = lpToneGen->mpToneDesc; } else { - ALOGW("Cbk restarting prepareWave() failed\n"); + ALOGW("Cbk restarting prepareWave() failed"); lpToneGen->mState = TONE_IDLE; lpToneGen->mpAudioTrack->stop(); // Force loop exit @@ -1270,14 +1266,14 @@ audioCallback_EndLoop: lSignal = true; break; case TONE_STOPPING: - ALOGV("Cbk Stopping\n"); + ALOGV("Cbk Stopping"); lpToneGen->mState = TONE_STOPPED; // Force loop exit lNumSmp = 0; break; case TONE_STOPPED: lpToneGen->mState = TONE_INIT; - ALOGV("Cbk Stopped track\n"); + ALOGV("Cbk Stopped track"); lpToneGen->mpAudioTrack->stop(); // Force loop exit lNumSmp = 0; @@ -1285,7 +1281,7 @@ audioCallback_EndLoop: lSignal = true; break; case TONE_STARTING: - ALOGV("Cbk starting track\n"); + ALOGV("Cbk starting track"); lpToneGen->mState = TONE_PLAYING; lSignal = true; break; @@ -1321,7 +1317,7 @@ audioCallback_EndLoop: bool ToneGenerator::prepareWave() { unsigned int segmentIdx = 0; - if (!mpNewToneDesc) { + if (mpNewToneDesc == NULL) { return false; } @@ -1353,9 +1349,6 @@ bool ToneGenerator::prepareWave() { new ToneGenerator::WaveGenerator((unsigned short)mSamplingRate, frequency, TONEGEN_GAIN/lNumWaves); - if (lpWaveGen == 0) { - goto prepareWave_exit; - } mWaveGens.add(frequency, lpWaveGen); } frequency = mpNewToneDesc->segments[segmentIdx].waveFreq[++freqIdx]; @@ -1375,12 +1368,6 @@ bool ToneGenerator::prepareWave() { } return true; - -prepareWave_exit: - - clearWaveGens(); - - return false; } @@ -1447,13 +1434,13 @@ void ToneGenerator::clearWaveGens() { // none // //////////////////////////////////////////////////////////////////////////////// -int ToneGenerator::getToneForRegion(int toneType) { - int regionTone; +ToneGenerator::tone_type ToneGenerator::getToneForRegion(tone_type toneType) { + tone_type regionTone; if (mRegion == CEPT || toneType < FIRST_SUP_TONE || toneType > LAST_SUP_TONE) { regionTone = toneType; } else { - regionTone = sToneMappingTable[mRegion][toneType - FIRST_SUP_TONE]; + regionTone = (tone_type) sToneMappingTable[mRegion][toneType - FIRST_SUP_TONE]; } ALOGV("getToneForRegion, tone %d, region %d, regionTone %d", toneType, mRegion, regionTone); @@ -1504,7 +1491,7 @@ ToneGenerator::WaveGenerator::WaveGenerator(unsigned short samplingRate, d0 = 32767; mA1_Q14 = (short) d0; - ALOGV("WaveGenerator init, mA1_Q14: %d, mS2_0: %d, mAmplitude_Q15: %d\n", + ALOGV("WaveGenerator init, mA1_Q14: %d, mS2_0: %d, mAmplitude_Q15: %d", mA1_Q14, mS2_0, mAmplitude_Q15); } diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp index d08ffa5..70f8c0c 100644 --- a/media/libmedia/Visualizer.cpp +++ b/media/libmedia/Visualizer.cpp @@ -27,8 +27,7 @@ #include <cutils/bitops.h> #include <media/Visualizer.h> - -extern void fixed_fft_real(int n, int32_t *v); +#include <audio_utils/fixedfft.h> namespace android { @@ -54,7 +53,7 @@ Visualizer::~Visualizer() status_t Visualizer::setEnabled(bool enabled) { - Mutex::Autolock _l(mLock); + Mutex::Autolock _l(mCaptureLock); sp<CaptureThread> t = mCaptureThread; if (t != 0) { @@ -74,7 +73,7 @@ status_t Visualizer::setEnabled(bool enabled) if (status == NO_ERROR) { if (t != 0) { if (enabled) { - t->run("AudioTrackThread"); + t->run("Visualizer"); } else { t->requestExit(); } @@ -93,7 +92,7 @@ status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t if (rate > CAPTURE_RATE_MAX) { return BAD_VALUE; } - Mutex::Autolock _l(mLock); + Mutex::Autolock _l(mCaptureLock); if (mEnabled) { return INVALID_OPERATION; @@ -115,10 +114,6 @@ status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t if (cbk != NULL) { mCaptureThread = new CaptureThread(*this, rate, ((flags & CAPTURE_CALL_JAVA) != 0)); - if (mCaptureThread == 0) { - ALOGE("Could not create callback thread"); - return NO_INIT; - } } ALOGV("setCaptureCallBack() rate: %d thread %p flags 0x%08x", rate, mCaptureThread.get(), mCaptureFlags); @@ -133,7 +128,7 @@ status_t Visualizer::setCaptureSize(uint32_t size) return BAD_VALUE; } - Mutex::Autolock _l(mLock); + Mutex::Autolock _l(mCaptureLock); if (mEnabled) { return INVALID_OPERATION; } @@ -173,7 +168,7 @@ status_t Visualizer::getWaveForm(uint8_t *waveform) uint32_t replySize = mCaptureSize; status = command(VISUALIZER_CMD_CAPTURE, 0, NULL, &replySize, waveform); ALOGV("getWaveForm() command returned %d", status); - if (replySize == 0) { + if ((status == NO_ERROR) && (replySize == 0)) { status = NOT_ENOUGH_DATA; } } else { @@ -235,7 +230,7 @@ status_t Visualizer::doFft(uint8_t *fft, uint8_t *waveform) void Visualizer::periodicCapture() { - Mutex::Autolock _l(mLock); + Mutex::Autolock _l(mCaptureLock); ALOGV("periodicCapture() %p mCaptureCallBack %p mCaptureFlags 0x%08x", this, mCaptureCallBack, mCaptureFlags); if (mCaptureCallBack != NULL && diff --git a/media/libmedia/autodetect.cpp b/media/libmedia/autodetect.cpp index dfcc6a0..be5c3b2 100644 --- a/media/libmedia/autodetect.cpp +++ b/media/libmedia/autodetect.cpp @@ -16,7 +16,7 @@ #include "autodetect.h" -typedef struct CharRange { +struct CharRange { uint16_t first; uint16_t last; }; diff --git a/media/libmedia/fixedfft.cpp b/media/libmedia/fixedfft.cpp deleted file mode 100644 index 2b495e6..0000000 --- a/media/libmedia/fixedfft.cpp +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright (C) 2010 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* A Fixed point implementation of Fast Fourier Transform (FFT). Complex numbers - * are represented by 32-bit integers, where higher 16 bits are real part and - * lower ones are imaginary part. Few compromises are made between efficiency, - * accuracy, and maintainability. To make it fast, arithmetic shifts are used - * instead of divisions, and bitwise inverses are used instead of negates. To - * keep it small, only radix-2 Cooley-Tukey algorithm is implemented, and only - * half of the twiddle factors are stored. Although there are still ways to make - * it even faster or smaller, it costs too much on one of the aspects. - */ - -#include <stdio.h> -#include <stdint.h> -#ifdef __arm__ -#include <machine/cpu-features.h> -#endif - -#define LOG_FFT_SIZE 10 -#define MAX_FFT_SIZE (1 << LOG_FFT_SIZE) - -static const int32_t twiddle[MAX_FFT_SIZE / 4] = { - 0x00008000, 0xff378001, 0xfe6e8002, 0xfda58006, 0xfcdc800a, 0xfc13800f, - 0xfb4a8016, 0xfa81801e, 0xf9b88027, 0xf8ef8032, 0xf827803e, 0xf75e804b, - 0xf6958059, 0xf5cd8068, 0xf5058079, 0xf43c808b, 0xf374809e, 0xf2ac80b2, - 0xf1e480c8, 0xf11c80de, 0xf05580f6, 0xef8d8110, 0xeec6812a, 0xedff8146, - 0xed388163, 0xec718181, 0xebab81a0, 0xeae481c1, 0xea1e81e2, 0xe9588205, - 0xe892822a, 0xe7cd824f, 0xe7078276, 0xe642829d, 0xe57d82c6, 0xe4b982f1, - 0xe3f4831c, 0xe3308349, 0xe26d8377, 0xe1a983a6, 0xe0e683d6, 0xe0238407, - 0xdf61843a, 0xde9e846e, 0xdddc84a3, 0xdd1b84d9, 0xdc598511, 0xdb998549, - 0xdad88583, 0xda1885be, 0xd95885fa, 0xd8988637, 0xd7d98676, 0xd71b86b6, - 0xd65c86f6, 0xd59e8738, 0xd4e1877b, 0xd42487c0, 0xd3678805, 0xd2ab884c, - 0xd1ef8894, 0xd13488dd, 0xd0798927, 0xcfbe8972, 0xcf0489be, 0xce4b8a0c, - 0xcd928a5a, 0xccd98aaa, 0xcc218afb, 0xcb698b4d, 0xcab28ba0, 0xc9fc8bf5, - 0xc9468c4a, 0xc8908ca1, 0xc7db8cf8, 0xc7278d51, 0xc6738dab, 0xc5c08e06, - 0xc50d8e62, 0xc45b8ebf, 0xc3a98f1d, 0xc2f88f7d, 0xc2488fdd, 0xc198903e, - 0xc0e990a1, 0xc03a9105, 0xbf8c9169, 0xbedf91cf, 0xbe329236, 0xbd86929e, - 0xbcda9307, 0xbc2f9371, 0xbb8593dc, 0xbadc9448, 0xba3394b5, 0xb98b9523, - 0xb8e39592, 0xb83c9603, 0xb7969674, 0xb6f196e6, 0xb64c9759, 0xb5a897ce, - 0xb5059843, 0xb46298b9, 0xb3c09930, 0xb31f99a9, 0xb27f9a22, 0xb1df9a9c, - 0xb1409b17, 0xb0a29b94, 0xb0059c11, 0xaf689c8f, 0xaecc9d0e, 0xae319d8e, - 0xad979e0f, 0xacfd9e91, 0xac659f14, 0xabcd9f98, 0xab36a01c, 0xaaa0a0a2, - 0xaa0aa129, 0xa976a1b0, 0xa8e2a238, 0xa84fa2c2, 0xa7bda34c, 0xa72ca3d7, - 0xa69ca463, 0xa60ca4f0, 0xa57ea57e, 0xa4f0a60c, 0xa463a69c, 0xa3d7a72c, - 0xa34ca7bd, 0xa2c2a84f, 0xa238a8e2, 0xa1b0a976, 0xa129aa0a, 0xa0a2aaa0, - 0xa01cab36, 0x9f98abcd, 0x9f14ac65, 0x9e91acfd, 0x9e0fad97, 0x9d8eae31, - 0x9d0eaecc, 0x9c8faf68, 0x9c11b005, 0x9b94b0a2, 0x9b17b140, 0x9a9cb1df, - 0x9a22b27f, 0x99a9b31f, 0x9930b3c0, 0x98b9b462, 0x9843b505, 0x97ceb5a8, - 0x9759b64c, 0x96e6b6f1, 0x9674b796, 0x9603b83c, 0x9592b8e3, 0x9523b98b, - 0x94b5ba33, 0x9448badc, 0x93dcbb85, 0x9371bc2f, 0x9307bcda, 0x929ebd86, - 0x9236be32, 0x91cfbedf, 0x9169bf8c, 0x9105c03a, 0x90a1c0e9, 0x903ec198, - 0x8fddc248, 0x8f7dc2f8, 0x8f1dc3a9, 0x8ebfc45b, 0x8e62c50d, 0x8e06c5c0, - 0x8dabc673, 0x8d51c727, 0x8cf8c7db, 0x8ca1c890, 0x8c4ac946, 0x8bf5c9fc, - 0x8ba0cab2, 0x8b4dcb69, 0x8afbcc21, 0x8aaaccd9, 0x8a5acd92, 0x8a0cce4b, - 0x89becf04, 0x8972cfbe, 0x8927d079, 0x88ddd134, 0x8894d1ef, 0x884cd2ab, - 0x8805d367, 0x87c0d424, 0x877bd4e1, 0x8738d59e, 0x86f6d65c, 0x86b6d71b, - 0x8676d7d9, 0x8637d898, 0x85fad958, 0x85beda18, 0x8583dad8, 0x8549db99, - 0x8511dc59, 0x84d9dd1b, 0x84a3dddc, 0x846ede9e, 0x843adf61, 0x8407e023, - 0x83d6e0e6, 0x83a6e1a9, 0x8377e26d, 0x8349e330, 0x831ce3f4, 0x82f1e4b9, - 0x82c6e57d, 0x829de642, 0x8276e707, 0x824fe7cd, 0x822ae892, 0x8205e958, - 0x81e2ea1e, 0x81c1eae4, 0x81a0ebab, 0x8181ec71, 0x8163ed38, 0x8146edff, - 0x812aeec6, 0x8110ef8d, 0x80f6f055, 0x80def11c, 0x80c8f1e4, 0x80b2f2ac, - 0x809ef374, 0x808bf43c, 0x8079f505, 0x8068f5cd, 0x8059f695, 0x804bf75e, - 0x803ef827, 0x8032f8ef, 0x8027f9b8, 0x801efa81, 0x8016fb4a, 0x800ffc13, - 0x800afcdc, 0x8006fda5, 0x8002fe6e, 0x8001ff37, -}; - -/* Returns the multiplication of \conj{a} and {b}. */ -static inline int32_t mult(int32_t a, int32_t b) -{ -#if __ARM_ARCH__ >= 6 - int32_t t = b; - __asm__("smuad %0, %0, %1" : "+r" (t) : "r" (a)); - __asm__("smusdx %0, %0, %1" : "+r" (b) : "r" (a)); - __asm__("pkhtb %0, %0, %1, ASR #16" : "+r" (t) : "r" (b)); - return t; -#else - return (((a >> 16) * (b >> 16) + (int16_t)a * (int16_t)b) & ~0xFFFF) | - ((((a >> 16) * (int16_t)b - (int16_t)a * (b >> 16)) >> 16) & 0xFFFF); -#endif -} - -static inline int32_t half(int32_t a) -{ -#if __ARM_ARCH__ >= 6 - __asm__("shadd16 %0, %0, %1" : "+r" (a) : "r" (0)); - return a; -#else - return ((a >> 1) & ~0x8000) | (a & 0x8000); -#endif -} - -void fixed_fft(int n, int32_t *v) -{ - int scale = LOG_FFT_SIZE, i, p, r; - - for (r = 0, i = 1; i < n; ++i) { - for (p = n; !(p & r); p >>= 1, r ^= p); - if (i < r) { - int32_t t = v[i]; - v[i] = v[r]; - v[r] = t; - } - } - - for (p = 1; p < n; p <<= 1) { - --scale; - - for (i = 0; i < n; i += p << 1) { - int32_t x = half(v[i]); - int32_t y = half(v[i + p]); - v[i] = x + y; - v[i + p] = x - y; - } - - for (r = 1; r < p; ++r) { - int32_t w = MAX_FFT_SIZE / 4 - (r << scale); - i = w >> 31; - w = twiddle[(w ^ i) - i] ^ (i << 16); - for (i = r; i < n; i += p << 1) { - int32_t x = half(v[i]); - int32_t y = mult(w, v[i + p]); - v[i] = x - y; - v[i + p] = x + y; - } - } - } -} - -void fixed_fft_real(int n, int32_t *v) -{ - int scale = LOG_FFT_SIZE, m = n >> 1, i; - - fixed_fft(n, v); - for (i = 1; i <= n; i <<= 1, --scale); - v[0] = mult(~v[0], 0x80008000); - v[m] = half(v[m]); - - for (i = 1; i < n >> 1; ++i) { - int32_t x = half(v[i]); - int32_t z = half(v[n - i]); - int32_t y = z - (x ^ 0xFFFF); - x = half(x + (z ^ 0xFFFF)); - y = mult(y, twiddle[i << scale]); - v[i] = x - y; - v[n - i] = (x + y) ^ 0xFFFF; - } -} diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp index 88e269f..8d53357 100644 --- a/media/libmedia/mediametadataretriever.cpp +++ b/media/libmedia/mediametadataretriever.cpp @@ -35,7 +35,7 @@ sp<MediaMetadataRetriever::DeathNotifier> MediaMetadataRetriever::sDeathNotifier const sp<IMediaPlayerService>& MediaMetadataRetriever::getService() { Mutex::Autolock lock(sServiceLock); - if (sService.get() == 0) { + if (sService == 0) { sp<IServiceManager> sm = defaultServiceManager(); sp<IBinder> binder; do { diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp index 2284927..4ff1862 100644 --- a/media/libmedia/mediaplayer.cpp +++ b/media/libmedia/mediaplayer.cpp @@ -1,4 +1,4 @@ -/* mediaplayer.cpp +/* ** ** Copyright 2006, The Android Open Source Project ** @@ -30,9 +30,7 @@ #include <gui/SurfaceTextureClient.h> #include <media/mediaplayer.h> -#include <media/AudioTrack.h> - -#include <surfaceflinger/Surface.h> +#include <media/AudioSystem.h> #include <binder/MemoryBase.h> @@ -63,6 +61,7 @@ MediaPlayer::MediaPlayer() mAudioSessionId = AudioSystem::newAudioSessionId(); AudioSystem::acquireAudioSessionId(mAudioSessionId); mSendLevel = 0; + mRetransmitEndpointValid = false; } MediaPlayer::~MediaPlayer() @@ -95,6 +94,7 @@ void MediaPlayer::clear_l() mCurrentPosition = -1; mSeekPosition = -1; mVideoWidth = mVideoHeight = 0; + mRetransmitEndpointValid = false; } status_t MediaPlayer::setListener(const sp<MediaPlayerListener>& listener) @@ -146,7 +146,8 @@ status_t MediaPlayer::setDataSource( const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != 0) { sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId)); - if (NO_ERROR != player->setDataSource(url, headers)) { + if ((NO_ERROR != doSetRetransmitEndpoint(player)) || + (NO_ERROR != player->setDataSource(url, headers))) { player.clear(); } err = attachNewPlayer(player); @@ -162,7 +163,8 @@ status_t MediaPlayer::setDataSource(int fd, int64_t offset, int64_t length) const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != 0) { sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId)); - if (NO_ERROR != player->setDataSource(fd, offset, length)) { + if ((NO_ERROR != doSetRetransmitEndpoint(player)) || + (NO_ERROR != player->setDataSource(fd, offset, length))) { player.clear(); } err = attachNewPlayer(player); @@ -177,7 +179,8 @@ status_t MediaPlayer::setDataSource(const sp<IStreamSource> &source) const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != 0) { sp<IMediaPlayer> player(service->create(getpid(), this, mAudioSessionId)); - if (NO_ERROR != player->setDataSource(source)) { + if ((NO_ERROR != doSetRetransmitEndpoint(player)) || + (NO_ERROR != player->setDataSource(source))) { player.clear(); } err = attachNewPlayer(player); @@ -471,6 +474,20 @@ status_t MediaPlayer::reset_l() return NO_ERROR; } +status_t MediaPlayer::doSetRetransmitEndpoint(const sp<IMediaPlayer>& player) { + Mutex::Autolock _l(mLock); + + if (player == NULL) { + return UNKNOWN_ERROR; + } + + if (mRetransmitEndpointValid) { + return player->setRetransmitEndpoint(&mRetransmitEndpoint); + } + + return OK; +} + status_t MediaPlayer::reset() { ALOGV("reset"); @@ -478,7 +495,7 @@ status_t MediaPlayer::reset() return reset_l(); } -status_t MediaPlayer::setAudioStreamType(int type) +status_t MediaPlayer::setAudioStreamType(audio_stream_type_t type) { ALOGV("MediaPlayer::setAudioStreamType"); Mutex::Autolock _l(mLock); @@ -599,6 +616,34 @@ status_t MediaPlayer::getParameter(int key, Parcel *reply) return INVALID_OPERATION; } +status_t MediaPlayer::setRetransmitEndpoint(const char* addrString, + uint16_t port) { + ALOGV("MediaPlayer::setRetransmitEndpoint(%s:%hu)", + addrString ? addrString : "(null)", port); + + Mutex::Autolock _l(mLock); + if ((mPlayer != NULL) || (mCurrentState != MEDIA_PLAYER_IDLE)) + return INVALID_OPERATION; + + if (NULL == addrString) { + mRetransmitEndpointValid = false; + return OK; + } + + struct in_addr saddr; + if(!inet_aton(addrString, &saddr)) { + return BAD_VALUE; + } + + memset(&mRetransmitEndpoint, 0, sizeof(&mRetransmitEndpoint)); + mRetransmitEndpoint.sin_family = AF_INET; + mRetransmitEndpoint.sin_addr = saddr; + mRetransmitEndpoint.sin_port = htons(port); + mRetransmitEndpointValid = true; + + return OK; +} + void MediaPlayer::notify(int msg, int ext1, int ext2, const Parcel *obj) { ALOGV("message received msg=%d, ext1=%d, ext2=%d", msg, ext1, ext2); @@ -709,7 +754,7 @@ void MediaPlayer::notify(int msg, int ext1, int ext2, const Parcel *obj) } } -/*static*/ sp<IMemory> MediaPlayer::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) +/*static*/ sp<IMemory> MediaPlayer::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) { ALOGV("decode(%s)", url); sp<IMemory> p; @@ -729,7 +774,7 @@ void MediaPlayer::died() notify(MEDIA_ERROR, MEDIA_ERROR_SERVER_DIED, 0); } -/*static*/ sp<IMemory> MediaPlayer::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) +/*static*/ sp<IMemory> MediaPlayer::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) { ALOGV("decode(%d, %lld, %lld)", fd, offset, length); sp<IMemory> p; diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp index 8d947d8..cc73014 100644 --- a/media/libmedia/mediarecorder.cpp +++ b/media/libmedia/mediarecorder.cpp @@ -18,7 +18,6 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "MediaRecorder" #include <utils/Log.h> -#include <surfaceflinger/Surface.h> #include <media/mediarecorder.h> #include <binder/IServiceManager.h> #include <utils/String8.h> diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk index a3e2517..e521648 100644 --- a/media/libmediaplayerservice/Android.mk +++ b/media/libmediaplayerservice/Android.mk @@ -29,7 +29,8 @@ LOCAL_SHARED_LIBRARIES := \ libstagefright_omx \ libstagefright_foundation \ libgui \ - libdl + libdl \ + libaah_rtp LOCAL_STATIC_LIBRARIES := \ libstagefright_nuplayer \ diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp index f5cb019..1a85c9c 100644 --- a/media/libmediaplayerservice/MediaPlayerService.cpp +++ b/media/libmediaplayerservice/MediaPlayerService.cpp @@ -70,6 +70,11 @@ #include <OMX.h> +namespace android { +sp<MediaPlayerBase> createAAH_TXPlayer(); +sp<MediaPlayerBase> createAAH_RXPlayer(); +} + namespace { using android::media::Metadata; using android::status_t; @@ -320,7 +325,7 @@ status_t MediaPlayerService::AudioOutput::dump(int fd, const Vector<String16>& a mStreamType, mLeftVolume, mRightVolume); result.append(buffer); snprintf(buffer, 255, " msec per frame(%f), latency (%d)\n", - mMsecsPerFrame, mLatency); + mMsecsPerFrame, (mTrack != 0) ? mTrack->latency() : -1); result.append(buffer); snprintf(buffer, 255, " aux effect id(%d), send level (%f)\n", mAuxEffectId, mSendLevel); @@ -487,6 +492,7 @@ MediaPlayerService::Client::Client( mStatus = NO_INIT; mAudioSessionId = audioSessionId; mUID = uid; + mRetransmitEndpointValid = false; #if CALLBACK_ANTAGONIZER ALOGD("create Antagonizer"); @@ -593,6 +599,10 @@ player_type getPlayerType(const char* url) return NU_PLAYER; } + if (!strncasecmp("aahRX://", url, 8)) { + return AAH_RX_PLAYER; + } + // use MidiFile for MIDI extensions int lenURL = strlen(url); for (int i = 0; i < NELEM(FILE_EXTS); ++i) { @@ -608,6 +618,44 @@ player_type getPlayerType(const char* url) return getDefaultPlayerType(); } +player_type MediaPlayerService::Client::getPlayerType(int fd, + int64_t offset, + int64_t length) +{ + // Until re-transmit functionality is added to the existing core android + // players, we use the special AAH TX player whenever we were configured + // for retransmission. + if (mRetransmitEndpointValid) { + return AAH_TX_PLAYER; + } + + return android::getPlayerType(fd, offset, length); +} + +player_type MediaPlayerService::Client::getPlayerType(const char* url) +{ + // Until re-transmit functionality is added to the existing core android + // players, we use the special AAH TX player whenever we were configured + // for retransmission. + if (mRetransmitEndpointValid) { + return AAH_TX_PLAYER; + } + + return android::getPlayerType(url); +} + +player_type MediaPlayerService::Client::getPlayerType( + const sp<IStreamSource> &source) { + // Until re-transmit functionality is added to the existing core android + // players, we use the special AAH TX player whenever we were configured + // for retransmission. + if (mRetransmitEndpointValid) { + return AAH_TX_PLAYER; + } + + return NU_PLAYER; +} + static sp<MediaPlayerBase> createPlayer(player_type playerType, void* cookie, notify_callback_f notifyFunc) { @@ -629,6 +677,14 @@ static sp<MediaPlayerBase> createPlayer(player_type playerType, void* cookie, ALOGV("Create Test Player stub"); p = new TestPlayerStub(); break; + case AAH_RX_PLAYER: + ALOGV(" create A@H RX Player"); + p = createAAH_RXPlayer(); + break; + case AAH_TX_PLAYER: + ALOGV(" create A@H TX Player"); + p = createAAH_TXPlayer(); + break; default: ALOGE("Unknown player type: %d", playerType); return NULL; @@ -665,6 +721,49 @@ sp<MediaPlayerBase> MediaPlayerService::Client::createPlayer(player_type playerT return p; } +sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre( + player_type playerType) +{ + ALOGV("player type = %d", playerType); + + // create the right type of player + sp<MediaPlayerBase> p = createPlayer(playerType); + if (p == NULL) { + return p; + } + + if (!p->hardwareOutput()) { + mAudioOutput = new AudioOutput(mAudioSessionId); + static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput); + } + + return p; +} + +void MediaPlayerService::Client::setDataSource_post( + const sp<MediaPlayerBase>& p, + status_t status) +{ + ALOGV(" setDataSource"); + mStatus = status; + if (mStatus != OK) { + ALOGE(" error: %d", mStatus); + return; + } + + // Set the re-transmission endpoint if one was chosen. + if (mRetransmitEndpointValid) { + mStatus = p->setRetransmitEndpoint(&mRetransmitEndpoint); + if (mStatus != NO_ERROR) { + ALOGE("setRetransmitEndpoint error: %d", mStatus); + } + } + + if (mStatus == OK) { + mPlayer = p; + } +} + status_t MediaPlayerService::Client::setDataSource( const char *url, const KeyedVector<String8, String8> *headers) { @@ -696,25 +795,12 @@ status_t MediaPlayerService::Client::setDataSource( return mStatus; } else { player_type playerType = getPlayerType(url); - ALOGV("player type = %d", playerType); - - // create the right type of player - sp<MediaPlayerBase> p = createPlayer(playerType); - if (p == NULL) return NO_INIT; - - if (!p->hardwareOutput()) { - mAudioOutput = new AudioOutput(mAudioSessionId); - static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput); + sp<MediaPlayerBase> p = setDataSource_pre(playerType); + if (p == NULL) { + return NO_INIT; } - // now set data source - ALOGV(" setDataSource"); - mStatus = p->setDataSource(url, headers); - if (mStatus == NO_ERROR) { - mPlayer = p; - } else { - ALOGE(" error: %d", mStatus); - } + setDataSource_post(p, p->setDataSource(url, headers)); return mStatus; } } @@ -745,46 +831,34 @@ status_t MediaPlayerService::Client::setDataSource(int fd, int64_t offset, int64 ALOGV("calculated length = %lld", length); } + // Until re-transmit functionality is added to the existing core android + // players, we use the special AAH TX player whenever we were configured for + // retransmission. player_type playerType = getPlayerType(fd, offset, length); - ALOGV("player type = %d", playerType); - - // create the right type of player - sp<MediaPlayerBase> p = createPlayer(playerType); - if (p == NULL) return NO_INIT; - - if (!p->hardwareOutput()) { - mAudioOutput = new AudioOutput(mAudioSessionId); - static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput); + sp<MediaPlayerBase> p = setDataSource_pre(playerType); + if (p == NULL) { + return NO_INIT; } // now set data source - mStatus = p->setDataSource(fd, offset, length); - if (mStatus == NO_ERROR) mPlayer = p; - + setDataSource_post(p, p->setDataSource(fd, offset, length)); return mStatus; } status_t MediaPlayerService::Client::setDataSource( const sp<IStreamSource> &source) { // create the right type of player - sp<MediaPlayerBase> p = createPlayer(NU_PLAYER); - + // Until re-transmit functionality is added to the existing core android + // players, we use the special AAH TX player whenever we were configured for + // retransmission. + player_type playerType = getPlayerType(source); + sp<MediaPlayerBase> p = setDataSource_pre(playerType); if (p == NULL) { return NO_INIT; } - if (!p->hardwareOutput()) { - mAudioOutput = new AudioOutput(mAudioSessionId); - static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput); - } - // now set data source - mStatus = p->setDataSource(source); - - if (mStatus == OK) { - mPlayer = p; - } - + setDataSource_post(p, p->setDataSource(source)); return mStatus; } @@ -1005,12 +1079,13 @@ status_t MediaPlayerService::Client::seekTo(int msec) status_t MediaPlayerService::Client::reset() { ALOGV("[%d] reset", mConnId); + mRetransmitEndpointValid = false; sp<MediaPlayerBase> p = getPlayer(); if (p == 0) return UNKNOWN_ERROR; return p->reset(); } -status_t MediaPlayerService::Client::setAudioStreamType(int type) +status_t MediaPlayerService::Client::setAudioStreamType(audio_stream_type_t type) { ALOGV("[%d] setAudioStreamType(%d)", mConnId, type); // TODO: for hardware output, call player instead @@ -1031,9 +1106,21 @@ status_t MediaPlayerService::Client::setLooping(int loop) status_t MediaPlayerService::Client::setVolume(float leftVolume, float rightVolume) { ALOGV("[%d] setVolume(%f, %f)", mConnId, leftVolume, rightVolume); - // TODO: for hardware output, call player instead - Mutex::Autolock l(mLock); - if (mAudioOutput != 0) mAudioOutput->setVolume(leftVolume, rightVolume); + + // for hardware output, call player instead + sp<MediaPlayerBase> p = getPlayer(); + { + Mutex::Autolock l(mLock); + if (p != 0 && p->hardwareOutput()) { + MediaPlayerHWInterface* hwp = + reinterpret_cast<MediaPlayerHWInterface*>(p.get()); + return hwp->setVolume(leftVolume, rightVolume); + } else { + if (mAudioOutput != 0) mAudioOutput->setVolume(leftVolume, rightVolume); + return NO_ERROR; + } + } + return NO_ERROR; } @@ -1067,6 +1154,36 @@ status_t MediaPlayerService::Client::getParameter(int key, Parcel *reply) { return p->getParameter(key, reply); } +status_t MediaPlayerService::Client::setRetransmitEndpoint( + const struct sockaddr_in* endpoint) { + + if (NULL != endpoint) { + uint32_t a = ntohl(endpoint->sin_addr.s_addr); + uint16_t p = ntohs(endpoint->sin_port); + ALOGV("[%d] setRetransmitEndpoint(%u.%u.%u.%u:%hu)", mConnId, + (a >> 24), (a >> 16) & 0xFF, (a >> 8) & 0xFF, (a & 0xFF), p); + } else { + ALOGV("[%d] setRetransmitEndpoint = <none>", mConnId); + } + + sp<MediaPlayerBase> p = getPlayer(); + + // Right now, the only valid time to set a retransmit endpoint is before + // player selection has been made (since the presence or absence of a + // retransmit endpoint is going to determine which player is selected during + // setDataSource). + if (p != 0) return INVALID_OPERATION; + + if (NULL != endpoint) { + mRetransmitEndpoint = *endpoint; + mRetransmitEndpointValid = true; + } else { + mRetransmitEndpointValid = false; + } + + return NO_ERROR; +} + void MediaPlayerService::Client::notify( void* cookie, int msg, int ext1, int ext2, const Parcel *obj) { @@ -1149,7 +1266,7 @@ int Antagonizer::callbackThread(void* user) static size_t kDefaultHeapSize = 1024 * 1024; // 1MB -sp<IMemory> MediaPlayerService::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) +sp<IMemory> MediaPlayerService::decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) { ALOGV("decode(%s)", url); sp<MemoryBase> mem; @@ -1197,7 +1314,7 @@ sp<IMemory> MediaPlayerService::decode(const char* url, uint32_t *pSampleRate, i mem = new MemoryBase(cache->getHeap(), 0, cache->size()); *pSampleRate = cache->sampleRate(); *pNumChannels = cache->channelCount(); - *pFormat = (int)cache->format(); + *pFormat = cache->format(); ALOGV("return memory @ %p, sampleRate=%u, channelCount = %d, format = %d", mem->pointer(), *pSampleRate, *pNumChannels, *pFormat); Exit: @@ -1205,7 +1322,7 @@ Exit: return mem; } -sp<IMemory> MediaPlayerService::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat) +sp<IMemory> MediaPlayerService::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat) { ALOGV("decode(%d, %lld, %lld)", fd, offset, length); sp<MemoryBase> mem; @@ -1265,7 +1382,8 @@ MediaPlayerService::AudioOutput::AudioOutput(int sessionId) mStreamType = AUDIO_STREAM_MUSIC; mLeftVolume = 1.0; mRightVolume = 1.0; - mLatency = 0; + mPlaybackRatePermille = 1000; + mSampleRateHz = 0; mMsecsPerFrame = 0; mAuxEffectId = 0; mSendLevel = 0.0; @@ -1324,7 +1442,8 @@ ssize_t MediaPlayerService::AudioOutput::frameSize() const uint32_t MediaPlayerService::AudioOutput::latency () const { - return mLatency; + if (mTrack == 0) return 0; + return mTrack->latency(); } float MediaPlayerService::AudioOutput::msecsPerFrame() const @@ -1339,7 +1458,8 @@ status_t MediaPlayerService::AudioOutput::getPosition(uint32_t *position) } status_t MediaPlayerService::AudioOutput::open( - uint32_t sampleRate, int channelCount, int format, int bufferCount, + uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask, + audio_format_t format, int bufferCount, AudioCallback cb, void *cookie) { mCallback = cb; @@ -1351,7 +1471,8 @@ status_t MediaPlayerService::AudioOutput::open( bufferCount = mMinBufferCount; } - ALOGV("open(%u, %d, %d, %d, %d)", sampleRate, channelCount, format, bufferCount,mSessionId); + ALOGV("open(%u, %d, 0x%x, %d, %d, %d)", sampleRate, channelCount, channelMask, + format, bufferCount, mSessionId); if (mTrack) close(); int afSampleRate; int afFrameCount; @@ -1366,13 +1487,21 @@ status_t MediaPlayerService::AudioOutput::open( frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate; + if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) { + channelMask = audio_channel_mask_from_count(channelCount); + if (0 == channelMask) { + ALOGE("open() error, can\'t derive mask for %d audio channels", channelCount); + return NO_INIT; + } + } + AudioTrack *t; if (mCallback != NULL) { t = new AudioTrack( mStreamType, sampleRate, format, - (channelCount == 2) ? AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO, + channelMask, frameCount, 0 /* flags */, CallbackWrapper, @@ -1384,7 +1513,7 @@ status_t MediaPlayerService::AudioOutput::open( mStreamType, sampleRate, format, - (channelCount == 2) ? AUDIO_CHANNEL_OUT_STEREO : AUDIO_CHANNEL_OUT_MONO, + channelMask, frameCount, 0, NULL, @@ -1402,10 +1531,14 @@ status_t MediaPlayerService::AudioOutput::open( ALOGV("setVolume"); t->setVolume(mLeftVolume, mRightVolume); - mMsecsPerFrame = 1.e3 / (float) sampleRate; - mLatency = t->latency(); + mSampleRateHz = sampleRate; + mMsecsPerFrame = mPlaybackRatePermille / (float) sampleRate; mTrack = t; + status_t res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000); + if (res != NO_ERROR) { + return res; + } t->setAuxEffectSendLevel(mSendLevel); return t->attachAuxEffect(mAuxEffectId);; } @@ -1469,6 +1602,22 @@ void MediaPlayerService::AudioOutput::setVolume(float left, float right) } } +status_t MediaPlayerService::AudioOutput::setPlaybackRatePermille(int32_t ratePermille) +{ + ALOGV("setPlaybackRatePermille(%d)", ratePermille); + status_t res = NO_ERROR; + if (mTrack) { + res = mTrack->setSampleRate(ratePermille * mSampleRateHz / 1000); + } else { + res = NO_INIT; + } + mPlaybackRatePermille = ratePermille; + if (mSampleRateHz != 0) { + mMsecsPerFrame = mPlaybackRatePermille / (float) mSampleRateHz; + } + return res; +} + status_t MediaPlayerService::AudioOutput::setAuxEffectSendLevel(float level) { ALOGV("setAuxEffectSendLevel(%f)", level); @@ -1611,17 +1760,18 @@ bool CallbackThread::threadLoop() { //////////////////////////////////////////////////////////////////////////////// status_t MediaPlayerService::AudioCache::open( - uint32_t sampleRate, int channelCount, int format, int bufferCount, + uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask, + audio_format_t format, int bufferCount, AudioCallback cb, void *cookie) { - ALOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount); + ALOGV("open(%u, %d, 0x%x, %d, %d)", sampleRate, channelCount, channelMask, format, bufferCount); if (mHeap->getHeapID() < 0) { return NO_INIT; } mSampleRate = sampleRate; mChannelCount = (uint16_t)channelCount; - mFormat = (uint16_t)format; + mFormat = format; mMsecsPerFrame = 1.e3 / (float) sampleRate; if (cb != NULL) { diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h index 04d9e28..85cec22 100644 --- a/media/libmediaplayerservice/MediaPlayerService.h +++ b/media/libmediaplayerservice/MediaPlayerService.h @@ -18,6 +18,8 @@ #ifndef ANDROID_MEDIAPLAYERSERVICE_H #define ANDROID_MEDIAPLAYERSERVICE_H +#include <arpa/inet.h> + #include <utils/Log.h> #include <utils/threads.h> #include <utils/List.h> @@ -34,6 +36,7 @@ namespace android { +class AudioTrack; class IMediaRecorder; class IMediaMetadataRetriever; class IOMX; @@ -82,8 +85,8 @@ class MediaPlayerService : public BnMediaPlayerService virtual int getSessionId(); virtual status_t open( - uint32_t sampleRate, int channelCount, - int format, int bufferCount, + uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask, + audio_format_t format, int bufferCount, AudioCallback cb, void *cookie); virtual void start(); @@ -92,8 +95,9 @@ class MediaPlayerService : public BnMediaPlayerService virtual void flush(); virtual void pause(); virtual void close(); - void setAudioStreamType(int streamType) { mStreamType = streamType; } + void setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; } void setVolume(float left, float right); + virtual status_t setPlaybackRatePermille(int32_t ratePermille); status_t setAuxEffectSendLevel(float level); status_t attachAuxEffect(int effectId); virtual status_t dump(int fd, const Vector<String16>& args) const; @@ -108,11 +112,12 @@ class MediaPlayerService : public BnMediaPlayerService AudioTrack* mTrack; AudioCallback mCallback; void * mCallbackCookie; - int mStreamType; + audio_stream_type_t mStreamType; float mLeftVolume; float mRightVolume; + int32_t mPlaybackRatePermille; + uint32_t mSampleRateHz; // sample rate of the content, as set in open() float mMsecsPerFrame; - uint32_t mLatency; int mSessionId; float mSendLevel; int mAuxEffectId; @@ -139,8 +144,8 @@ class MediaPlayerService : public BnMediaPlayerService virtual int getSessionId(); virtual status_t open( - uint32_t sampleRate, int channelCount, int format, - int bufferCount = 1, + uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask, + audio_format_t format, int bufferCount = 1, AudioCallback cb = NULL, void *cookie = NULL); virtual void start(); @@ -149,10 +154,11 @@ class MediaPlayerService : public BnMediaPlayerService virtual void flush() {} virtual void pause() {} virtual void close() {} - void setAudioStreamType(int streamType) {} + void setAudioStreamType(audio_stream_type_t streamType) {} void setVolume(float left, float right) {} + virtual status_t setPlaybackRatePermille(int32_t ratePermille) { return INVALID_OPERATION; } uint32_t sampleRate() const { return mSampleRate; } - uint32_t format() const { return (uint32_t)mFormat; } + audio_format_t format() const { return mFormat; } size_t size() const { return mSize; } status_t wait(); @@ -170,7 +176,7 @@ class MediaPlayerService : public BnMediaPlayerService sp<MemoryHeapBase> mHeap; float mMsecsPerFrame; uint16_t mChannelCount; - uint16_t mFormat; + audio_format_t mFormat; ssize_t mFrameCount; uint32_t mSampleRate; uint32_t mSize; @@ -190,8 +196,8 @@ public: virtual sp<IMediaPlayer> create(pid_t pid, const sp<IMediaPlayerClient>& client, int audioSessionId); - virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, int* pFormat); - virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat); + virtual sp<IMemory> decode(const char* url, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat); + virtual sp<IMemory> decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, audio_format_t* pFormat); virtual sp<IOMX> getOMX(); virtual status_t dump(int fd, const Vector<String16>& args); @@ -259,7 +265,7 @@ private: virtual status_t getCurrentPosition(int* msec); virtual status_t getDuration(int* msec); virtual status_t reset(); - virtual status_t setAudioStreamType(int type); + virtual status_t setAudioStreamType(audio_stream_type_t type); virtual status_t setLooping(int loop); virtual status_t setVolume(float leftVolume, float rightVolume); virtual status_t invoke(const Parcel& request, Parcel *reply); @@ -271,6 +277,7 @@ private: virtual status_t attachAuxEffect(int effectId); virtual status_t setParameter(int key, const Parcel &request); virtual status_t getParameter(int key, Parcel *reply); + virtual status_t setRetransmitEndpoint(const struct sockaddr_in* endpoint); sp<MediaPlayerBase> createPlayer(player_type playerType); @@ -282,6 +289,14 @@ private: virtual status_t setDataSource(const sp<IStreamSource> &source); + sp<MediaPlayerBase> setDataSource_pre(player_type playerType); + void setDataSource_post(const sp<MediaPlayerBase>& p, + status_t status); + + player_type getPlayerType(int fd, int64_t offset, int64_t length); + player_type getPlayerType(const char* url); + player_type getPlayerType(const sp<IStreamSource> &source); + static void notify(void* cookie, int msg, int ext1, int ext2, const Parcel *obj); @@ -333,6 +348,8 @@ private: uid_t mUID; sp<ANativeWindow> mConnectedWindow; sp<IBinder> mConnectedWindowBinder; + struct sockaddr_in mRetransmitEndpoint; + bool mRetransmitEndpointValid; // Metadata filters. media::Metadata::Filter mMetadataAllow; // protected by mLock diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp index d219fc2..beda945 100644 --- a/media/libmediaplayerservice/MediaRecorderClient.cpp +++ b/media/libmediaplayerservice/MediaRecorderClient.cpp @@ -33,8 +33,6 @@ #include <utils/String16.h> -#include <media/AudioTrack.h> - #include <system/audio.h> #include "MediaRecorderClient.h" diff --git a/media/libmediaplayerservice/MidiFile.cpp b/media/libmediaplayerservice/MidiFile.cpp index d89b5f4..8db5b9b 100644 --- a/media/libmediaplayerservice/MidiFile.cpp +++ b/media/libmediaplayerservice/MidiFile.cpp @@ -86,7 +86,8 @@ MidiFile::MidiFile() : // create playback thread { Mutex::Autolock l(mMutex); - createThreadEtc(renderThread, this, "midithread", ANDROID_PRIORITY_AUDIO); + mThread = new MidiFileThread(this); + mThread->run("midithread", ANDROID_PRIORITY_AUDIO); mCondition.wait(mMutex); ALOGV("thread started"); } @@ -420,18 +421,14 @@ status_t MidiFile::setLooping(int loop) } status_t MidiFile::createOutputTrack() { - if (mAudioSink->open(pLibConfig->sampleRate, pLibConfig->numChannels, AUDIO_FORMAT_PCM_16_BIT, 2) != NO_ERROR) { + if (mAudioSink->open(pLibConfig->sampleRate, pLibConfig->numChannels, + CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT, 2) != NO_ERROR) { ALOGE("mAudioSink open failed"); return ERROR_OPEN_FAILED; } return NO_ERROR; } -int MidiFile::renderThread(void* p) { - - return ((MidiFile*)p)->render(); -} - int MidiFile::render() { EAS_RESULT result = EAS_FAILURE; EAS_I32 count; diff --git a/media/libmediaplayerservice/MidiFile.h b/media/libmediaplayerservice/MidiFile.h index 3469389..f6f8f7b 100644 --- a/media/libmediaplayerservice/MidiFile.h +++ b/media/libmediaplayerservice/MidiFile.h @@ -19,11 +19,11 @@ #define ANDROID_MIDIFILE_H #include <media/MediaPlayerInterface.h> -#include <media/AudioTrack.h> #include <libsonivox/eas.h> namespace android { +// Note that the name MidiFile is misleading; this actually represents a MIDI file player class MidiFile : public MediaPlayerInterface { public: MidiFile(); @@ -65,7 +65,6 @@ public: private: status_t createOutputTrack(); status_t reset_nosync(); - static int renderThread(void*); int render(); void updateState(){ EAS_State(mEasData, mEasHandle, &mState); } @@ -78,12 +77,35 @@ private: EAS_I32 mDuration; EAS_STATE mState; EAS_FILE mFileLocator; - int mStreamType; + audio_stream_type_t mStreamType; bool mLoop; volatile bool mExit; bool mPaused; volatile bool mRender; pid_t mTid; + + class MidiFileThread : public Thread { + public: + MidiFileThread(MidiFile *midiPlayer) : mMidiFile(midiPlayer) { + } + + protected: + virtual ~MidiFileThread() {} + + private: + MidiFile *mMidiFile; + + bool threadLoop() { + int result; + result = mMidiFile->render(); + return false; + } + + MidiFileThread(const MidiFileThread &); + MidiFileThread &operator=(const MidiFileThread &); + }; + + sp<MidiFileThread> mThread; }; }; // namespace android diff --git a/media/libmediaplayerservice/StagefrightPlayer.cpp b/media/libmediaplayerservice/StagefrightPlayer.cpp index 6d7771a..052ebf0 100644 --- a/media/libmediaplayerservice/StagefrightPlayer.cpp +++ b/media/libmediaplayerservice/StagefrightPlayer.cpp @@ -176,7 +176,7 @@ void StagefrightPlayer::setAudioSink(const sp<AudioSink> &audioSink) { } status_t StagefrightPlayer::setParameter(int key, const Parcel &request) { - ALOGV("setParameter"); + ALOGV("setParameter(key=%d)", key); return mPlayer->setParameter(key, request); } diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp index 4632016..ca79657 100644 --- a/media/libmediaplayerservice/StagefrightRecorder.cpp +++ b/media/libmediaplayerservice/StagefrightRecorder.cpp @@ -24,6 +24,7 @@ #include <binder/IServiceManager.h> #include <media/IMediaPlayerService.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/AudioSource.h> #include <media/stagefright/AMRWriter.h> #include <media/stagefright/AACWriter.h> @@ -31,7 +32,6 @@ #include <media/stagefright/CameraSourceTimeLapse.h> #include <media/stagefright/MPEG2TSWriter.h> #include <media/stagefright/MPEG4Writer.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MetaData.h> #include <media/stagefright/OMXClient.h> @@ -40,7 +40,7 @@ #include <media/MediaProfiles.h> #include <camera/ICamera.h> #include <camera/CameraParameters.h> -#include <surfaceflinger/Surface.h> +#include <gui/Surface.h> #include <utils/Errors.h> #include <sys/types.h> @@ -241,8 +241,8 @@ status_t StagefrightRecorder::setOutputFile(const char *path) { status_t StagefrightRecorder::setOutputFile(int fd, int64_t offset, int64_t length) { ALOGV("setOutputFile: %d, %lld, %lld", fd, offset, length); // These don't make any sense, do they? - CHECK_EQ(offset, 0); - CHECK_EQ(length, 0); + CHECK_EQ(offset, 0ll); + CHECK_EQ(length, 0ll); if (fd < 0) { ALOGE("Invalid file descriptor: %d", fd); @@ -734,7 +734,7 @@ status_t StagefrightRecorder::prepare() { } status_t StagefrightRecorder::start() { - CHECK(mOutputFd >= 0); + CHECK_GE(mOutputFd, 0); if (mWriter != NULL) { ALOGE("File writer is not avaialble"); @@ -837,7 +837,7 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() { } OMXClient client; - CHECK_EQ(client.connect(), OK); + CHECK_EQ(client.connect(), (status_t)OK); sp<MediaSource> audioEncoder = OMXCodec::Create(client.interface(), encMeta, @@ -850,9 +850,9 @@ sp<MediaSource> StagefrightRecorder::createAudioSource() { status_t StagefrightRecorder::startAACRecording() { // FIXME: // Add support for OUTPUT_FORMAT_AAC_ADIF - CHECK(mOutputFormat == OUTPUT_FORMAT_AAC_ADTS); + CHECK_EQ(mOutputFormat, OUTPUT_FORMAT_AAC_ADTS); - CHECK(mAudioEncoder == AUDIO_ENCODER_AAC); + CHECK_EQ(mAudioEncoder, AUDIO_ENCODER_AAC); CHECK(mAudioSource != AUDIO_SOURCE_CNT); mWriter = new AACWriter(mOutputFd); @@ -1291,6 +1291,12 @@ status_t StagefrightRecorder::setupCameraSource( videoSize.width = mVideoWidth; videoSize.height = mVideoHeight; if (mCaptureTimeLapse) { + if (mTimeBetweenTimeLapseFrameCaptureUs < 0) { + ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld", + mTimeBetweenTimeLapseFrameCaptureUs); + return BAD_VALUE; + } + mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera( mCamera, mCameraProxy, mCameraId, videoSize, mFrameRate, mPreviewSurface, @@ -1386,7 +1392,7 @@ status_t StagefrightRecorder::setupVideoEncoder( } OMXClient client; - CHECK_EQ(client.connect(), OK); + CHECK_EQ(client.connect(), (status_t)OK); uint32_t encoder_flags = 0; if (mIsMetaDataStoredInVideoBuffers) { diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp index a00aaa5..526120a 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp @@ -38,7 +38,6 @@ #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> -#include <surfaceflinger/Surface.h> #include <gui/ISurfaceTexture.h> #include "avc_utils.h" @@ -337,6 +336,7 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { CHECK_EQ(mAudioSink->open( sampleRate, numChannels, + CHANNEL_MASK_USE_CHANNEL_ORDER, AUDIO_FORMAT_PCM_16_BIT, 8 /* bufferCount */), (status_t)OK); @@ -387,10 +387,10 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { audio ? "audio" : "video"); mRenderer->queueEOS(audio, UNKNOWN_ERROR); - } else { - CHECK_EQ((int)what, (int)ACodec::kWhatDrainThisBuffer); - + } else if (what == ACodec::kWhatDrainThisBuffer) { renderBuffer(audio, codecRequest); + } else { + ALOGV("Unhandled codec notification %d.", what); } break; @@ -480,7 +480,7 @@ void NuPlayer::onMessageReceived(const sp<AMessage> &msg) { // completed. ALOGV("postponing reset mFlushingAudio=%d, mFlushingVideo=%d", - mFlushingAudio, mFlushingVideo); + mFlushingAudio, mFlushingVideo); mResetPostponed = true; break; @@ -690,7 +690,7 @@ status_t NuPlayer::feedDecoderInputData(bool audio, const sp<AMessage> &msg) { bool timeChange = (type & ATSParser::DISCONTINUITY_TIME) != 0; ALOGI("%s discontinuity (formatChange=%d, time=%d)", - audio ? "audio" : "video", formatChange, timeChange); + audio ? "audio" : "video", formatChange, timeChange); if (audio) { mSkipRenderingAudioUntilMediaTimeUs = -1; @@ -768,7 +768,7 @@ status_t NuPlayer::feedDecoderInputData(bool audio, const sp<AMessage> &msg) { mediaTimeUs / 1E6); #endif - reply->setObject("buffer", accessUnit); + reply->setBuffer("buffer", accessUnit); reply->post(); return OK; @@ -793,10 +793,8 @@ void NuPlayer::renderBuffer(bool audio, const sp<AMessage> &msg) { return; } - sp<RefBase> obj; - CHECK(msg->findObject("buffer", &obj)); - - sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get()); + sp<ABuffer> buffer; + CHECK(msg->findBuffer("buffer", &buffer)); int64_t &skipUntilMediaTimeUs = audio diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h index ffc710e..6be14be 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayer.h +++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h @@ -21,8 +21,6 @@ #include <media/MediaPlayerInterface.h> #include <media/stagefright/foundation/AHandler.h> #include <media/stagefright/NativeWindowWrapper.h> -#include <gui/SurfaceTextureClient.h> -#include <surfaceflinger/Surface.h> namespace android { diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp index 56c2773..460fc98 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp @@ -29,8 +29,6 @@ #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MetaData.h> #include <media/stagefright/Utils.h> -#include <surfaceflinger/Surface.h> -#include <gui/ISurfaceTexture.h> namespace android { @@ -214,8 +212,6 @@ sp<AMessage> NuPlayer::Decoder::makeFormat(const sp<MetaData> &meta) { buffer->meta()->setInt32("csd", true); mCSD.push(buffer); - - msg->setObject("csd", buffer); } else if (meta->findData(kKeyESDS, &type, &data, &size)) { ESDS esds((const char *)data, size); CHECK_EQ(esds.InitCheck(), (status_t)OK); @@ -242,9 +238,8 @@ void NuPlayer::Decoder::onFillThisBuffer(const sp<AMessage> &msg) { CHECK(msg->findMessage("reply", &reply)); #if 0 - sp<RefBase> obj; - CHECK(msg->findObject("buffer", &obj)); - sp<ABuffer> outBuffer = static_cast<ABuffer *>(obj.get()); + sp<ABuffer> outBuffer; + CHECK(msg->findBuffer("buffer", &outBuffer)); #else sp<ABuffer> outBuffer; #endif @@ -253,7 +248,7 @@ void NuPlayer::Decoder::onFillThisBuffer(const sp<AMessage> &msg) { outBuffer = mCSD.editItemAt(mCSDIndex++); outBuffer->meta()->setInt64("timeUs", 0); - reply->setObject("buffer", outBuffer); + reply->setBuffer("buffer", outBuffer); reply->post(); return; } diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp index 074cb4f..5738ecb 100644 --- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp +++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp @@ -60,7 +60,7 @@ void NuPlayer::Renderer::queueBuffer( const sp<AMessage> ¬ifyConsumed) { sp<AMessage> msg = new AMessage(kWhatQueueBuffer, id()); msg->setInt32("audio", static_cast<int32_t>(audio)); - msg->setObject("buffer", buffer); + msg->setBuffer("buffer", buffer); msg->setMessage("notifyConsumed", notifyConsumed); msg->post(); } @@ -376,7 +376,7 @@ void NuPlayer::Renderer::onDrainVideoQueue() { bool tooLate = (mVideoLateByUs > 40000); if (tooLate) { - ALOGV("video late by %lld us (%.2f secs)", lateByUs, lateByUs / 1E6); + ALOGV("video late by %lld us (%.2f secs)", mVideoLateByUs, mVideoLateByUs / 1E6); } else { ALOGV("rendering video at media time %.2f secs", mediaTimeUs / 1E6); } @@ -411,9 +411,8 @@ void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) { return; } - sp<RefBase> obj; - CHECK(msg->findObject("buffer", &obj)); - sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get()); + sp<ABuffer> buffer; + CHECK(msg->findBuffer("buffer", &buffer)); sp<AMessage> notifyConsumed; CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed)); @@ -629,7 +628,7 @@ void NuPlayer::Renderer::onPause() { } ALOGV("now paused audio queue has %d entries, video has %d entries", - mAudioQueue.size(), mVideoQueue.size()); + mAudioQueue.size(), mVideoQueue.size()); mPaused = true; } diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp index 6eb0d07..4c65b65 100644 --- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp +++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp @@ -218,10 +218,8 @@ void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) { CHECK(msg->findSize("trackIndex", &trackIndex)); CHECK_LT(trackIndex, mTracks.size()); - sp<RefBase> obj; - CHECK(msg->findObject("accessUnit", &obj)); - - sp<ABuffer> accessUnit = static_cast<ABuffer *>(obj.get()); + sp<ABuffer> accessUnit; + CHECK(msg->findBuffer("accessUnit", &accessUnit)); int32_t damaged; if (accessUnit->meta()->findInt32("damaged", &damaged) diff --git a/media/libstagefright/AACExtractor.cpp b/media/libstagefright/AACExtractor.cpp index 52b1200..4d1072f 100644 --- a/media/libstagefright/AACExtractor.cpp +++ b/media/libstagefright/AACExtractor.cpp @@ -22,9 +22,10 @@ #include "include/avc_utils.h" #include <media/stagefright/foundation/ABuffer.h> +#include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaSource.h> @@ -131,18 +132,28 @@ static size_t getAdtsFrameLength(const sp<DataSource> &source, off64_t offset, s return frameSize; } -AACExtractor::AACExtractor(const sp<DataSource> &source) +AACExtractor::AACExtractor( + const sp<DataSource> &source, const sp<AMessage> &_meta) : mDataSource(source), mInitCheck(NO_INIT), mFrameDurationUs(0) { - String8 mimeType; - float confidence; - if (!SniffAAC(mDataSource, &mimeType, &confidence, NULL)) { - return; + sp<AMessage> meta = _meta; + + if (meta == NULL) { + String8 mimeType; + float confidence; + sp<AMessage> _meta; + + if (!SniffAAC(mDataSource, &mimeType, &confidence, &meta)) { + return; + } } + int64_t offset; + CHECK(meta->findInt64("offset", &offset)); + uint8_t profile, sf_index, channel, header[2]; - if (mDataSource->readAt(2, &header, 2) < 2) { + if (mDataSource->readAt(offset + 2, &header, 2) < 2) { return; } @@ -156,7 +167,6 @@ AACExtractor::AACExtractor(const sp<DataSource> &source) mMeta = MakeAACCodecSpecificData(profile, sf_index, channel); - off64_t offset = 0; off64_t streamSize, numFrames = 0; size_t frameSize = 0; int64_t duration = 0; @@ -245,7 +255,12 @@ AACSource::~AACSource() { status_t AACSource::start(MetaData *params) { CHECK(!mStarted); - mOffset = 0; + if (mOffsetVector.empty()) { + mOffset = 0; + } else { + mOffset = mOffsetVector.itemAt(0); + } + mCurrentTimeUs = 0; mGroup = new MediaBufferGroup; mGroup->add_buffer(new MediaBuffer(kMaxFrameSize)); @@ -318,10 +333,39 @@ status_t AACSource::read( bool SniffAAC( const sp<DataSource> &source, String8 *mimeType, float *confidence, - sp<AMessage> *) { + sp<AMessage> *meta) { + off64_t pos = 0; + + for (;;) { + uint8_t id3header[10]; + if (source->readAt(pos, id3header, sizeof(id3header)) + < (ssize_t)sizeof(id3header)) { + return false; + } + + if (memcmp("ID3", id3header, 3)) { + break; + } + + // Skip the ID3v2 header. + + size_t len = + ((id3header[6] & 0x7f) << 21) + | ((id3header[7] & 0x7f) << 14) + | ((id3header[8] & 0x7f) << 7) + | (id3header[9] & 0x7f); + + len += 10; + + pos += len; + + ALOGV("skipped ID3 tag, new starting offset is %lld (0x%016llx)", + pos, pos); + } + uint8_t header[2]; - if (source->readAt(0, &header, 2) != 2) { + if (source->readAt(pos, &header, 2) != 2) { return false; } @@ -329,6 +373,10 @@ bool SniffAAC( if ((header[0] == 0xff) && ((header[1] & 0xf6) == 0xf0)) { *mimeType = MEDIA_MIMETYPE_AUDIO_AAC_ADTS; *confidence = 0.2; + + *meta = new AMessage; + (*meta)->setInt64("offset", pos); + return true; } diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp index 1673ccd..9cdb463 100644 --- a/media/libstagefright/AACWriter.cpp +++ b/media/libstagefright/AACWriter.cpp @@ -60,7 +60,7 @@ AACWriter::AACWriter(int fd) AACWriter::~AACWriter() { if (mStarted) { - stop(); + reset(); } if (mFd != -1) { @@ -152,7 +152,7 @@ status_t AACWriter::pause() { return OK; } -status_t AACWriter::stop() { +status_t AACWriter::reset() { if (!mStarted) { return OK; } diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp index ca44ea3..09e4e45 100644 --- a/media/libstagefright/ACodec.cpp +++ b/media/libstagefright/ACodec.cpp @@ -26,14 +26,12 @@ #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/MediaCodecList.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/NativeWindowWrapper.h> #include <media/stagefright/OMXClient.h> #include <media/stagefright/OMXCodec.h> -#include <surfaceflinger/Surface.h> -#include <gui/SurfaceTextureClient.h> - #include <OMX_Component.h> namespace android { @@ -168,15 +166,36 @@ struct ACodec::UninitializedState : public ACodec::BaseState { protected: virtual bool onMessageReceived(const sp<AMessage> &msg); + virtual void stateEntered(); private: void onSetup(const sp<AMessage> &msg); + bool onAllocateComponent(const sp<AMessage> &msg); DISALLOW_EVIL_CONSTRUCTORS(UninitializedState); }; //////////////////////////////////////////////////////////////////////////////// +struct ACodec::LoadedState : public ACodec::BaseState { + LoadedState(ACodec *codec); + +protected: + virtual bool onMessageReceived(const sp<AMessage> &msg); + virtual void stateEntered(); + +private: + friend struct ACodec::UninitializedState; + + bool onConfigureComponent(const sp<AMessage> &msg); + void onStart(); + void onShutdown(bool keepComponentAllocated); + + DISALLOW_EVIL_CONSTRUCTORS(LoadedState); +}; + +//////////////////////////////////////////////////////////////////////////////// + struct ACodec::LoadedToIdleState : public ACodec::BaseState { LoadedToIdleState(ACodec *codec); @@ -265,6 +284,8 @@ protected: private: void changeStateIfWeOwnAllBuffers(); + bool mComponentNowIdle; + DISALLOW_EVIL_CONSTRUCTORS(ExecutingToIdleState); }; @@ -308,9 +329,13 @@ private: //////////////////////////////////////////////////////////////////////////////// ACodec::ACodec() - : mNode(NULL), - mSentFormat(false) { + : mQuirks(0), + mNode(NULL), + mSentFormat(false), + mIsEncoder(false), + mShutdownInProgress(false) { mUninitializedState = new UninitializedState(this); + mLoadedState = new LoadedState(this); mLoadedToIdleState = new LoadedToIdleState(this); mIdleToExecutingState = new IdleToExecutingState(this); mExecutingState = new ExecutingState(this); @@ -341,6 +366,22 @@ void ACodec::initiateSetup(const sp<AMessage> &msg) { msg->post(); } +void ACodec::initiateAllocateComponent(const sp<AMessage> &msg) { + msg->setWhat(kWhatAllocateComponent); + msg->setTarget(id()); + msg->post(); +} + +void ACodec::initiateConfigureComponent(const sp<AMessage> &msg) { + msg->setWhat(kWhatConfigureComponent); + msg->setTarget(id()); + msg->post(); +} + +void ACodec::initiateStart() { + (new AMessage(kWhatStart, id()))->post(); +} + void ACodec::signalFlush() { ALOGV("[%s] signalFlush", mComponentName.c_str()); (new AMessage(kWhatFlush, id()))->post(); @@ -350,8 +391,10 @@ void ACodec::signalResume() { (new AMessage(kWhatResume, id()))->post(); } -void ACodec::initiateShutdown() { - (new AMessage(kWhatShutdown, id()))->post(); +void ACodec::initiateShutdown(bool keepComponentAllocated) { + sp<AMessage> msg = new AMessage(kWhatShutdown, id()); + msg->setInt32("keepComponentAllocated", keepComponentAllocated); + msg->post(); } status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) { @@ -360,62 +403,71 @@ status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) { CHECK(mDealer[portIndex] == NULL); CHECK(mBuffers[portIndex].isEmpty()); + status_t err; if (mNativeWindow != NULL && portIndex == kPortIndexOutput) { - return allocateOutputBuffersFromNativeWindow(); - } - - OMX_PARAM_PORTDEFINITIONTYPE def; - InitOMXParams(&def); - def.nPortIndex = portIndex; + err = allocateOutputBuffersFromNativeWindow(); + } else { + OMX_PARAM_PORTDEFINITIONTYPE def; + InitOMXParams(&def); + def.nPortIndex = portIndex; - status_t err = mOMX->getParameter( - mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); + err = mOMX->getParameter( + mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); - if (err != OK) { - return err; - } + if (err == OK) { + ALOGV("[%s] Allocating %lu buffers of size %lu on %s port", + mComponentName.c_str(), + def.nBufferCountActual, def.nBufferSize, + portIndex == kPortIndexInput ? "input" : "output"); - ALOGV("[%s] Allocating %lu buffers of size %lu on %s port", - mComponentName.c_str(), - def.nBufferCountActual, def.nBufferSize, - portIndex == kPortIndexInput ? "input" : "output"); + size_t totalSize = def.nBufferCountActual * def.nBufferSize; + mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec"); - size_t totalSize = def.nBufferCountActual * def.nBufferSize; - mDealer[portIndex] = new MemoryDealer(totalSize, "OMXCodec"); + for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) { + sp<IMemory> mem = mDealer[portIndex]->allocate(def.nBufferSize); + CHECK(mem.get() != NULL); - for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) { - sp<IMemory> mem = mDealer[portIndex]->allocate(def.nBufferSize); - CHECK(mem.get() != NULL); + IOMX::buffer_id buffer; - IOMX::buffer_id buffer; + uint32_t requiresAllocateBufferBit = + (portIndex == kPortIndexInput) + ? OMXCodec::kRequiresAllocateBufferOnInputPorts + : OMXCodec::kRequiresAllocateBufferOnOutputPorts; - if (!strcasecmp( - mComponentName.c_str(), "OMX.TI.DUCATI1.VIDEO.DECODER")) { - if (portIndex == kPortIndexInput && i == 0) { - // Only log this warning once per allocation round. + if (mQuirks & requiresAllocateBufferBit) { + err = mOMX->allocateBufferWithBackup( + mNode, portIndex, mem, &buffer); + } else { + err = mOMX->useBuffer(mNode, portIndex, mem, &buffer); + } - ALOGW("OMX.TI.DUCATI1.VIDEO.DECODER requires the use of " - "OMX_AllocateBuffer instead of the preferred " - "OMX_UseBuffer. Vendor must fix this."); + BufferInfo info; + info.mBufferID = buffer; + info.mStatus = BufferInfo::OWNED_BY_US; + info.mData = new ABuffer(mem->pointer(), def.nBufferSize); + mBuffers[portIndex].push(info); } - - err = mOMX->allocateBufferWithBackup( - mNode, portIndex, mem, &buffer); - } else { - err = mOMX->useBuffer(mNode, portIndex, mem, &buffer); } + } - if (err != OK) { - return err; - } + if (err != OK) { + return err; + } - BufferInfo info; - info.mBufferID = buffer; - info.mStatus = BufferInfo::OWNED_BY_US; - info.mData = new ABuffer(mem->pointer(), def.nBufferSize); - mBuffers[portIndex].push(info); + sp<AMessage> notify = mNotify->dup(); + notify->setInt32("what", ACodec::kWhatBuffersAllocated); + + notify->setInt32("portIndex", portIndex); + for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) { + AString name = StringPrintf("buffer-id_%d", i); + notify->setPointer(name.c_str(), mBuffers[portIndex][i].mBufferID); + + name = StringPrintf("data_%d", i); + notify->setBuffer(name.c_str(), mBuffers[portIndex][i].mData); } + notify->post(); + return OK; } @@ -671,7 +723,7 @@ ACodec::BufferInfo *ACodec::findBufferByID( return NULL; } -void ACodec::setComponentRole( +status_t ACodec::setComponentRole( bool isEncoder, const char *mime) { struct MimeToRole { const char *mime; @@ -700,6 +752,8 @@ void ACodec::setComponentRole( "video_decoder.mpeg4", "video_encoder.mpeg4" }, { MEDIA_MIMETYPE_VIDEO_H263, "video_decoder.h263", "video_encoder.h263" }, + { MEDIA_MIMETYPE_VIDEO_VPX, + "video_decoder.vpx", "video_encoder.vpx" }, }; static const size_t kNumMimeToRole = @@ -713,7 +767,7 @@ void ACodec::setComponentRole( } if (i == kNumMimeToRole) { - return; + return ERROR_UNSUPPORTED; } const char *role = @@ -736,50 +790,83 @@ void ACodec::setComponentRole( if (err != OK) { ALOGW("[%s] Failed to set standard component role '%s'.", mComponentName.c_str(), role); + + return err; } } + + return OK; } -void ACodec::configureCodec( +status_t ACodec::configureCodec( const char *mime, const sp<AMessage> &msg) { - setComponentRole(false /* isEncoder */, mime); + int32_t encoder; + if (!msg->findInt32("encoder", &encoder)) { + encoder = false; + } - if (!strncasecmp(mime, "video/", 6)) { - int32_t width, height; - CHECK(msg->findInt32("width", &width)); - CHECK(msg->findInt32("height", &height)); + mIsEncoder = encoder; - CHECK_EQ(setupVideoDecoder(mime, width, height), - (status_t)OK); + status_t err = setComponentRole(encoder /* isEncoder */, mime); + + if (err != OK) { + return err; + } + + int32_t bitRate = 0; + if (encoder && !msg->findInt32("bitrate", &bitRate)) { + return INVALID_OPERATION; + } + + if (!strncasecmp(mime, "video/", 6)) { + if (encoder) { + err = setupVideoEncoder(mime, msg); + } else { + int32_t width, height; + if (!msg->findInt32("width", &width) + || !msg->findInt32("height", &height)) { + err = INVALID_OPERATION; + } else { + err = setupVideoDecoder(mime, width, height); + } + } } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) { int32_t numChannels, sampleRate; - CHECK(msg->findInt32("channel-count", &numChannels)); - CHECK(msg->findInt32("sample-rate", &sampleRate)); - - CHECK_EQ(setupAACDecoder(numChannels, sampleRate), (status_t)OK); + if (!msg->findInt32("channel-count", &numChannels) + || !msg->findInt32("sample-rate", &sampleRate)) { + err = INVALID_OPERATION; + } else { + err = setupAACCodec(encoder, numChannels, sampleRate, bitRate); + } } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)) { - CHECK_EQ(setupAMRDecoder(false /* isWAMR */), (status_t)OK); + err = setupAMRCodec(encoder, false /* isWAMR */, bitRate); } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) { - CHECK_EQ(setupAMRDecoder(true /* isWAMR */), (status_t)OK); + err = setupAMRCodec(encoder, true /* isWAMR */, bitRate); } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_ALAW) || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_G711_MLAW)) { // These are PCM-like formats with a fixed sample rate but // a variable number of channels. int32_t numChannels; - CHECK(msg->findInt32("channel-count", &numChannels)); + if (!msg->findInt32("channel-count", &numChannels)) { + err = INVALID_OPERATION; + } else { + err = setupG711Codec(encoder, numChannels); + } + } - CHECK_EQ(setupG711Decoder(numChannels), (status_t)OK); + if (err != OK) { + return err; } int32_t maxInputSize; if (msg->findInt32("max-input-size", &maxInputSize)) { - CHECK_EQ(setMinBufferSize(kPortIndexInput, (size_t)maxInputSize), - (status_t)OK); + err = setMinBufferSize(kPortIndexInput, (size_t)maxInputSize); } else if (!strcmp("OMX.Nvidia.aac.decoder", mComponentName.c_str())) { - CHECK_EQ(setMinBufferSize(kPortIndexInput, 8192), // XXX - (status_t)OK); + err = setMinBufferSize(kPortIndexInput, 8192); // XXX } + + return err; } status_t ACodec::setMinBufferSize(OMX_U32 portIndex, size_t size) { @@ -819,12 +906,113 @@ status_t ACodec::setMinBufferSize(OMX_U32 portIndex, size_t size) { return OK; } -status_t ACodec::setupAACDecoder(int32_t numChannels, int32_t sampleRate) { +status_t ACodec::selectAudioPortFormat( + OMX_U32 portIndex, OMX_AUDIO_CODINGTYPE desiredFormat) { + OMX_AUDIO_PARAM_PORTFORMATTYPE format; + InitOMXParams(&format); + + format.nPortIndex = portIndex; + for (OMX_U32 index = 0;; ++index) { + format.nIndex = index; + + status_t err = mOMX->getParameter( + mNode, OMX_IndexParamAudioPortFormat, + &format, sizeof(format)); + + if (err != OK) { + return err; + } + + if (format.eEncoding == desiredFormat) { + break; + } + } + + return mOMX->setParameter( + mNode, OMX_IndexParamAudioPortFormat, &format, sizeof(format)); +} + +status_t ACodec::setupAACCodec( + bool encoder, + int32_t numChannels, int32_t sampleRate, int32_t bitRate) { + status_t err = setupRawAudioFormat( + encoder ? kPortIndexInput : kPortIndexOutput, + sampleRate, + numChannels); + + if (err != OK) { + return err; + } + + if (encoder) { + err = selectAudioPortFormat(kPortIndexOutput, OMX_AUDIO_CodingAAC); + + if (err != OK) { + return err; + } + + OMX_PARAM_PORTDEFINITIONTYPE def; + InitOMXParams(&def); + def.nPortIndex = kPortIndexOutput; + + err = mOMX->getParameter( + mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); + + if (err != OK) { + return err; + } + + def.format.audio.bFlagErrorConcealment = OMX_TRUE; + def.format.audio.eEncoding = OMX_AUDIO_CodingAAC; + + err = mOMX->setParameter( + mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); + + if (err != OK) { + return err; + } + + OMX_AUDIO_PARAM_AACPROFILETYPE profile; + InitOMXParams(&profile); + profile.nPortIndex = kPortIndexOutput; + + err = mOMX->getParameter( + mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile)); + + if (err != OK) { + return err; + } + + profile.nChannels = numChannels; + + profile.eChannelMode = + (numChannels == 1) + ? OMX_AUDIO_ChannelModeMono: OMX_AUDIO_ChannelModeStereo; + + profile.nSampleRate = sampleRate; + profile.nBitRate = bitRate; + profile.nAudioBandWidth = 0; + profile.nFrameLength = 0; + profile.nAACtools = OMX_AUDIO_AACToolAll; + profile.nAACERtools = OMX_AUDIO_AACERNone; + profile.eAACProfile = OMX_AUDIO_AACObjectLC; + profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF; + + err = mOMX->setParameter( + mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile)); + + if (err != OK) { + return err; + } + + return err; + } + OMX_AUDIO_PARAM_AACPROFILETYPE profile; InitOMXParams(&profile); profile.nPortIndex = kPortIndexInput; - status_t err = mOMX->getParameter( + err = mOMX->getParameter( mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile)); if (err != OK) { @@ -835,16 +1023,59 @@ status_t ACodec::setupAACDecoder(int32_t numChannels, int32_t sampleRate) { profile.nSampleRate = sampleRate; profile.eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4ADTS; - err = mOMX->setParameter( + return mOMX->setParameter( mNode, OMX_IndexParamAudioAac, &profile, sizeof(profile)); +} - return err; +static OMX_AUDIO_AMRBANDMODETYPE pickModeFromBitRate( + bool isAMRWB, int32_t bps) { + if (isAMRWB) { + if (bps <= 6600) { + return OMX_AUDIO_AMRBandModeWB0; + } else if (bps <= 8850) { + return OMX_AUDIO_AMRBandModeWB1; + } else if (bps <= 12650) { + return OMX_AUDIO_AMRBandModeWB2; + } else if (bps <= 14250) { + return OMX_AUDIO_AMRBandModeWB3; + } else if (bps <= 15850) { + return OMX_AUDIO_AMRBandModeWB4; + } else if (bps <= 18250) { + return OMX_AUDIO_AMRBandModeWB5; + } else if (bps <= 19850) { + return OMX_AUDIO_AMRBandModeWB6; + } else if (bps <= 23050) { + return OMX_AUDIO_AMRBandModeWB7; + } + + // 23850 bps + return OMX_AUDIO_AMRBandModeWB8; + } else { // AMRNB + if (bps <= 4750) { + return OMX_AUDIO_AMRBandModeNB0; + } else if (bps <= 5150) { + return OMX_AUDIO_AMRBandModeNB1; + } else if (bps <= 5900) { + return OMX_AUDIO_AMRBandModeNB2; + } else if (bps <= 6700) { + return OMX_AUDIO_AMRBandModeNB3; + } else if (bps <= 7400) { + return OMX_AUDIO_AMRBandModeNB4; + } else if (bps <= 7950) { + return OMX_AUDIO_AMRBandModeNB5; + } else if (bps <= 10200) { + return OMX_AUDIO_AMRBandModeNB6; + } + + // 12200 bps + return OMX_AUDIO_AMRBandModeNB7; + } } -status_t ACodec::setupAMRDecoder(bool isWAMR) { +status_t ACodec::setupAMRCodec(bool encoder, bool isWAMR, int32_t bitrate) { OMX_AUDIO_PARAM_AMRTYPE def; InitOMXParams(&def); - def.nPortIndex = kPortIndexInput; + def.nPortIndex = encoder ? kPortIndexOutput : kPortIndexInput; status_t err = mOMX->getParameter(mNode, OMX_IndexParamAudioAmr, &def, sizeof(def)); @@ -854,14 +1085,24 @@ status_t ACodec::setupAMRDecoder(bool isWAMR) { } def.eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF; + def.eAMRBandMode = pickModeFromBitRate(isWAMR, bitrate); + + err = mOMX->setParameter( + mNode, OMX_IndexParamAudioAmr, &def, sizeof(def)); - def.eAMRBandMode = - isWAMR ? OMX_AUDIO_AMRBandModeWB0 : OMX_AUDIO_AMRBandModeNB0; + if (err != OK) { + return err; + } - return mOMX->setParameter(mNode, OMX_IndexParamAudioAmr, &def, sizeof(def)); + return setupRawAudioFormat( + encoder ? kPortIndexInput : kPortIndexOutput, + isWAMR ? 16000 : 8000 /* sampleRate */, + 1 /* numChannels */); } -status_t ACodec::setupG711Decoder(int32_t numChannels) { +status_t ACodec::setupG711Codec(bool encoder, int32_t numChannels) { + CHECK(!encoder); // XXX TODO + return setupRawAudioFormat( kPortIndexInput, 8000 /* sampleRate */, numChannels); } @@ -1001,22 +1242,36 @@ status_t ACodec::setSupportedOutputFormat() { &format, sizeof(format)); } -status_t ACodec::setupVideoDecoder( - const char *mime, int32_t width, int32_t height) { - OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused; +static status_t GetVideoCodingTypeFromMime( + const char *mime, OMX_VIDEO_CODINGTYPE *codingType) { if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) { - compressionFormat = OMX_VIDEO_CodingAVC; + *codingType = OMX_VIDEO_CodingAVC; } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) { - compressionFormat = OMX_VIDEO_CodingMPEG4; + *codingType = OMX_VIDEO_CodingMPEG4; } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) { - compressionFormat = OMX_VIDEO_CodingH263; + *codingType = OMX_VIDEO_CodingH263; } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG2, mime)) { - compressionFormat = OMX_VIDEO_CodingMPEG2; + *codingType = OMX_VIDEO_CodingMPEG2; + } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_VPX, mime)) { + *codingType = OMX_VIDEO_CodingVPX; } else { - TRESPASS(); + *codingType = OMX_VIDEO_CodingUnused; + return ERROR_UNSUPPORTED; } - status_t err = setVideoPortFormatType( + return OK; +} + +status_t ACodec::setupVideoDecoder( + const char *mime, int32_t width, int32_t height) { + OMX_VIDEO_CODINGTYPE compressionFormat; + status_t err = GetVideoCodingTypeFromMime(mime, &compressionFormat); + + if (err != OK) { + return err; + } + + err = setVideoPortFormatType( kPortIndexInput, compressionFormat, OMX_COLOR_FormatUnused); if (err != OK) { @@ -1046,6 +1301,489 @@ status_t ACodec::setupVideoDecoder( return OK; } +status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) { + int32_t tmp; + if (!msg->findInt32("color-format", &tmp)) { + return INVALID_OPERATION; + } + + OMX_COLOR_FORMATTYPE colorFormat = + static_cast<OMX_COLOR_FORMATTYPE>(tmp); + + status_t err = setVideoPortFormatType( + kPortIndexInput, OMX_VIDEO_CodingUnused, colorFormat); + + if (err != OK) { + ALOGE("[%s] does not support color format %d", + mComponentName.c_str(), colorFormat); + + return err; + } + + /* Input port configuration */ + + OMX_PARAM_PORTDEFINITIONTYPE def; + InitOMXParams(&def); + + OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &def.format.video; + + def.nPortIndex = kPortIndexInput; + + err = mOMX->getParameter( + mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); + + if (err != OK) { + return err; + } + + int32_t width, height, bitrate; + if (!msg->findInt32("width", &width) + || !msg->findInt32("height", &height) + || !msg->findInt32("bitrate", &bitrate)) { + return INVALID_OPERATION; + } + + video_def->nFrameWidth = width; + video_def->nFrameHeight = height; + + int32_t stride; + if (!msg->findInt32("stride", &stride)) { + stride = width; + } + + video_def->nStride = stride; + + int32_t sliceHeight; + if (!msg->findInt32("slice-height", &sliceHeight)) { + sliceHeight = height; + } + + video_def->nSliceHeight = sliceHeight; + + def.nBufferSize = (video_def->nStride * video_def->nSliceHeight * 3) / 2; + + float frameRate; + if (!msg->findFloat("frame-rate", &frameRate)) { + int32_t tmp; + if (!msg->findInt32("frame-rate", &tmp)) { + return INVALID_OPERATION; + } + frameRate = (float)tmp; + } + + video_def->xFramerate = (OMX_U32)(frameRate * 65536.0f); + video_def->eCompressionFormat = OMX_VIDEO_CodingUnused; + video_def->eColorFormat = colorFormat; + + err = mOMX->setParameter( + mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); + + if (err != OK) { + ALOGE("[%s] failed to set input port definition parameters.", + mComponentName.c_str()); + + return err; + } + + /* Output port configuration */ + + OMX_VIDEO_CODINGTYPE compressionFormat; + err = GetVideoCodingTypeFromMime(mime, &compressionFormat); + + if (err != OK) { + return err; + } + + err = setVideoPortFormatType( + kPortIndexOutput, compressionFormat, OMX_COLOR_FormatUnused); + + if (err != OK) { + ALOGE("[%s] does not support compression format %d", + mComponentName.c_str(), compressionFormat); + + return err; + } + + def.nPortIndex = kPortIndexOutput; + + err = mOMX->getParameter( + mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); + + if (err != OK) { + return err; + } + + video_def->nFrameWidth = width; + video_def->nFrameHeight = height; + video_def->xFramerate = 0; + video_def->nBitrate = bitrate; + video_def->eCompressionFormat = compressionFormat; + video_def->eColorFormat = OMX_COLOR_FormatUnused; + + err = mOMX->setParameter( + mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)); + + if (err != OK) { + ALOGE("[%s] failed to set output port definition parameters.", + mComponentName.c_str()); + + return err; + } + + switch (compressionFormat) { + case OMX_VIDEO_CodingMPEG4: + err = setupMPEG4EncoderParameters(msg); + break; + + case OMX_VIDEO_CodingH263: + err = setupH263EncoderParameters(msg); + break; + + case OMX_VIDEO_CodingAVC: + err = setupAVCEncoderParameters(msg); + break; + + default: + break; + } + + ALOGI("setupVideoEncoder succeeded"); + + return err; +} + +static OMX_U32 setPFramesSpacing(int32_t iFramesInterval, int32_t frameRate) { + if (iFramesInterval < 0) { + return 0xFFFFFFFF; + } else if (iFramesInterval == 0) { + return 0; + } + OMX_U32 ret = frameRate * iFramesInterval; + CHECK(ret > 1); + return ret; +} + +status_t ACodec::setupMPEG4EncoderParameters(const sp<AMessage> &msg) { + int32_t bitrate, iFrameInterval; + if (!msg->findInt32("bitrate", &bitrate) + || !msg->findInt32("i-frame-interval", &iFrameInterval)) { + return INVALID_OPERATION; + } + + float frameRate; + if (!msg->findFloat("frame-rate", &frameRate)) { + int32_t tmp; + if (!msg->findInt32("frame-rate", &tmp)) { + return INVALID_OPERATION; + } + frameRate = (float)tmp; + } + + OMX_VIDEO_PARAM_MPEG4TYPE mpeg4type; + InitOMXParams(&mpeg4type); + mpeg4type.nPortIndex = kPortIndexOutput; + + status_t err = mOMX->getParameter( + mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type)); + + if (err != OK) { + return err; + } + + mpeg4type.nSliceHeaderSpacing = 0; + mpeg4type.bSVH = OMX_FALSE; + mpeg4type.bGov = OMX_FALSE; + + mpeg4type.nAllowedPictureTypes = + OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP; + + mpeg4type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate); + if (mpeg4type.nPFrames == 0) { + mpeg4type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI; + } + mpeg4type.nBFrames = 0; + mpeg4type.nIDCVLCThreshold = 0; + mpeg4type.bACPred = OMX_TRUE; + mpeg4type.nMaxPacketSize = 256; + mpeg4type.nTimeIncRes = 1000; + mpeg4type.nHeaderExtension = 0; + mpeg4type.bReversibleVLC = OMX_FALSE; + + int32_t profile; + if (msg->findInt32("profile", &profile)) { + int32_t level; + if (!msg->findInt32("level", &level)) { + return INVALID_OPERATION; + } + + err = verifySupportForProfileAndLevel(profile, level); + + if (err != OK) { + return err; + } + + mpeg4type.eProfile = static_cast<OMX_VIDEO_MPEG4PROFILETYPE>(profile); + mpeg4type.eLevel = static_cast<OMX_VIDEO_MPEG4LEVELTYPE>(level); + } + + err = mOMX->setParameter( + mNode, OMX_IndexParamVideoMpeg4, &mpeg4type, sizeof(mpeg4type)); + + if (err != OK) { + return err; + } + + err = configureBitrate(bitrate); + + if (err != OK) { + return err; + } + + return setupErrorCorrectionParameters(); +} + +status_t ACodec::setupH263EncoderParameters(const sp<AMessage> &msg) { + int32_t bitrate, iFrameInterval; + if (!msg->findInt32("bitrate", &bitrate) + || !msg->findInt32("i-frame-interval", &iFrameInterval)) { + return INVALID_OPERATION; + } + + float frameRate; + if (!msg->findFloat("frame-rate", &frameRate)) { + int32_t tmp; + if (!msg->findInt32("frame-rate", &tmp)) { + return INVALID_OPERATION; + } + frameRate = (float)tmp; + } + + OMX_VIDEO_PARAM_H263TYPE h263type; + InitOMXParams(&h263type); + h263type.nPortIndex = kPortIndexOutput; + + status_t err = mOMX->getParameter( + mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type)); + + if (err != OK) { + return err; + } + + h263type.nAllowedPictureTypes = + OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP; + + h263type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate); + if (h263type.nPFrames == 0) { + h263type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI; + } + h263type.nBFrames = 0; + + int32_t profile; + if (msg->findInt32("profile", &profile)) { + int32_t level; + if (!msg->findInt32("level", &level)) { + return INVALID_OPERATION; + } + + err = verifySupportForProfileAndLevel(profile, level); + + if (err != OK) { + return err; + } + + h263type.eProfile = static_cast<OMX_VIDEO_H263PROFILETYPE>(profile); + h263type.eLevel = static_cast<OMX_VIDEO_H263LEVELTYPE>(level); + } + + h263type.bPLUSPTYPEAllowed = OMX_FALSE; + h263type.bForceRoundingTypeToZero = OMX_FALSE; + h263type.nPictureHeaderRepetition = 0; + h263type.nGOBHeaderInterval = 0; + + err = mOMX->setParameter( + mNode, OMX_IndexParamVideoH263, &h263type, sizeof(h263type)); + + if (err != OK) { + return err; + } + + err = configureBitrate(bitrate); + + if (err != OK) { + return err; + } + + return setupErrorCorrectionParameters(); +} + +status_t ACodec::setupAVCEncoderParameters(const sp<AMessage> &msg) { + int32_t bitrate, iFrameInterval; + if (!msg->findInt32("bitrate", &bitrate) + || !msg->findInt32("i-frame-interval", &iFrameInterval)) { + return INVALID_OPERATION; + } + + float frameRate; + if (!msg->findFloat("frame-rate", &frameRate)) { + int32_t tmp; + if (!msg->findInt32("frame-rate", &tmp)) { + return INVALID_OPERATION; + } + frameRate = (float)tmp; + } + + OMX_VIDEO_PARAM_AVCTYPE h264type; + InitOMXParams(&h264type); + h264type.nPortIndex = kPortIndexOutput; + + status_t err = mOMX->getParameter( + mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type)); + + if (err != OK) { + return err; + } + + h264type.nAllowedPictureTypes = + OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP; + + int32_t profile; + if (msg->findInt32("profile", &profile)) { + int32_t level; + if (!msg->findInt32("level", &level)) { + return INVALID_OPERATION; + } + + err = verifySupportForProfileAndLevel(profile, level); + + if (err != OK) { + return err; + } + + h264type.eProfile = static_cast<OMX_VIDEO_AVCPROFILETYPE>(profile); + h264type.eLevel = static_cast<OMX_VIDEO_AVCLEVELTYPE>(level); + } + + // XXX + if (!strncmp(mComponentName.c_str(), "OMX.TI.DUCATI1", 14)) { + h264type.eProfile = OMX_VIDEO_AVCProfileBaseline; + } + + if (h264type.eProfile == OMX_VIDEO_AVCProfileBaseline) { + h264type.nSliceHeaderSpacing = 0; + h264type.bUseHadamard = OMX_TRUE; + h264type.nRefFrames = 1; + h264type.nBFrames = 0; + h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate); + if (h264type.nPFrames == 0) { + h264type.nAllowedPictureTypes = OMX_VIDEO_PictureTypeI; + } + h264type.nRefIdx10ActiveMinus1 = 0; + h264type.nRefIdx11ActiveMinus1 = 0; + h264type.bEntropyCodingCABAC = OMX_FALSE; + h264type.bWeightedPPrediction = OMX_FALSE; + h264type.bconstIpred = OMX_FALSE; + h264type.bDirect8x8Inference = OMX_FALSE; + h264type.bDirectSpatialTemporal = OMX_FALSE; + h264type.nCabacInitIdc = 0; + } + + if (h264type.nBFrames != 0) { + h264type.nAllowedPictureTypes |= OMX_VIDEO_PictureTypeB; + } + + h264type.bEnableUEP = OMX_FALSE; + h264type.bEnableFMO = OMX_FALSE; + h264type.bEnableASO = OMX_FALSE; + h264type.bEnableRS = OMX_FALSE; + h264type.bFrameMBsOnly = OMX_TRUE; + h264type.bMBAFF = OMX_FALSE; + h264type.eLoopFilterMode = OMX_VIDEO_AVCLoopFilterEnable; + + if (!strcasecmp("OMX.Nvidia.h264.encoder", mComponentName.c_str())) { + h264type.eLevel = OMX_VIDEO_AVCLevelMax; + } + + err = mOMX->setParameter( + mNode, OMX_IndexParamVideoAvc, &h264type, sizeof(h264type)); + + if (err != OK) { + return err; + } + + return configureBitrate(bitrate); +} + +status_t ACodec::verifySupportForProfileAndLevel( + int32_t profile, int32_t level) { + OMX_VIDEO_PARAM_PROFILELEVELTYPE params; + InitOMXParams(¶ms); + params.nPortIndex = kPortIndexOutput; + + for (params.nProfileIndex = 0;; ++params.nProfileIndex) { + status_t err = mOMX->getParameter( + mNode, + OMX_IndexParamVideoProfileLevelQuerySupported, + ¶ms, + sizeof(params)); + + if (err != OK) { + return err; + } + + int32_t supportedProfile = static_cast<int32_t>(params.eProfile); + int32_t supportedLevel = static_cast<int32_t>(params.eLevel); + + if (profile == supportedProfile && level <= supportedLevel) { + return OK; + } + } +} + +status_t ACodec::configureBitrate(int32_t bitrate) { + OMX_VIDEO_PARAM_BITRATETYPE bitrateType; + InitOMXParams(&bitrateType); + bitrateType.nPortIndex = kPortIndexOutput; + + status_t err = mOMX->getParameter( + mNode, OMX_IndexParamVideoBitrate, + &bitrateType, sizeof(bitrateType)); + + if (err != OK) { + return err; + } + + bitrateType.eControlRate = OMX_Video_ControlRateVariable; + bitrateType.nTargetBitrate = bitrate; + + return mOMX->setParameter( + mNode, OMX_IndexParamVideoBitrate, + &bitrateType, sizeof(bitrateType)); +} + +status_t ACodec::setupErrorCorrectionParameters() { + OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE errorCorrectionType; + InitOMXParams(&errorCorrectionType); + errorCorrectionType.nPortIndex = kPortIndexOutput; + + status_t err = mOMX->getParameter( + mNode, OMX_IndexParamVideoErrorCorrection, + &errorCorrectionType, sizeof(errorCorrectionType)); + + if (err != OK) { + return OK; // Optional feature. Ignore this failure + } + + errorCorrectionType.bEnableHEC = OMX_FALSE; + errorCorrectionType.bEnableResync = OMX_TRUE; + errorCorrectionType.nResynchMarkerSpacing = 256; + errorCorrectionType.bEnableDataPartitioning = OMX_FALSE; + errorCorrectionType.bEnableRVLC = OMX_FALSE; + + return mOMX->setParameter( + mNode, OMX_IndexParamVideoErrorCorrection, + &errorCorrectionType, sizeof(errorCorrectionType)); +} + status_t ACodec::setVideoFormatOnPort( OMX_U32 portIndex, int32_t width, int32_t height, OMX_VIDEO_CODINGTYPE compressionFormat) { @@ -1166,6 +1904,9 @@ void ACodec::sendFormatChange() { notify->setString("mime", MEDIA_MIMETYPE_VIDEO_RAW); notify->setInt32("width", videoDef->nFrameWidth); notify->setInt32("height", videoDef->nFrameHeight); + notify->setInt32("stride", videoDef->nStride); + notify->setInt32("slice-height", videoDef->nSliceHeight); + notify->setInt32("color-format", videoDef->eColorFormat); OMX_CONFIG_RECTTYPE rect; InitOMXParams(&rect); @@ -1241,10 +1982,11 @@ void ACodec::sendFormatChange() { mSentFormat = true; } -void ACodec::signalError(OMX_ERRORTYPE error) { +void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) { sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", ACodec::kWhatError); notify->setInt32("omx-error", error); + notify->setInt32("err", internalError); notify->post(); } @@ -1417,7 +2159,7 @@ void ACodec::BaseState::postFillThisBuffer(BufferInfo *info) { notify->setPointer("buffer-id", info->mBufferID); info->mData->meta()->clear(); - notify->setObject("buffer", info->mData); + notify->setBuffer("buffer", info->mData); sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, mCodec->id()); reply->setPointer("buffer-id", info->mBufferID); @@ -1433,18 +2175,26 @@ void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) { IOMX::buffer_id bufferID; CHECK(msg->findPointer("buffer-id", &bufferID)); - sp<RefBase> obj; + sp<ABuffer> buffer; int32_t err = OK; - if (!msg->findObject("buffer", &obj)) { + bool eos = false; + + if (!msg->findBuffer("buffer", &buffer)) { CHECK(msg->findInt32("err", &err)); ALOGV("[%s] saw error %d instead of an input buffer", mCodec->mComponentName.c_str(), err); - obj.clear(); + buffer.clear(); + + eos = true; } - sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get()); + int32_t tmp; + if (buffer != NULL && buffer->meta()->findInt32("eos", &tmp) && tmp) { + eos = true; + err = ERROR_END_OF_STREAM; + } BufferInfo *info = mCodec->findBufferByID(kPortIndexInput, bufferID); CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_UPSTREAM); @@ -1456,7 +2206,7 @@ void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) { switch (mode) { case KEEP_BUFFERS: { - if (buffer == NULL) { + if (eos) { if (!mCodec->mPortEOS[kPortIndexInput]) { mCodec->mPortEOS[kPortIndexInput] = true; mCodec->mInputEOSResult = err; @@ -1467,9 +2217,7 @@ void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) { case RESUBMIT_BUFFERS: { - if (buffer != NULL) { - CHECK(!mCodec->mPortEOS[kPortIndexInput]); - + if (buffer != NULL && !mCodec->mPortEOS[kPortIndexInput]) { int64_t timeUs; CHECK(buffer->meta()->findInt64("timeUs", &timeUs)); @@ -1480,6 +2228,10 @@ void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) { flags |= OMX_BUFFERFLAG_CODECCONFIG; } + if (eos) { + flags |= OMX_BUFFERFLAG_EOS; + } + if (buffer != info->mData) { if (0 && !(flags & OMX_BUFFERFLAG_CODECCONFIG)) { ALOGV("[%s] Needs to copy input data.", @@ -1493,6 +2245,9 @@ void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) { if (flags & OMX_BUFFERFLAG_CODECCONFIG) { ALOGV("[%s] calling emptyBuffer %p w/ codec specific data", mCodec->mComponentName.c_str(), bufferID); + } else if (flags & OMX_BUFFERFLAG_EOS) { + ALOGV("[%s] calling emptyBuffer %p w/ EOS", + mCodec->mComponentName.c_str(), bufferID); } else { ALOGV("[%s] calling emptyBuffer %p w/ time %lld us", mCodec->mComponentName.c_str(), bufferID, timeUs); @@ -1509,7 +2264,15 @@ void ACodec::BaseState::onInputBufferFilled(const sp<AMessage> &msg) { info->mStatus = BufferInfo::OWNED_BY_COMPONENT; - getMoreInputDataIfPossible(); + if (!eos) { + getMoreInputDataIfPossible(); + } else { + ALOGV("[%s] Signalled EOS on the input port", + mCodec->mComponentName.c_str()); + + mCodec->mPortEOS[kPortIndexInput] = true; + mCodec->mInputEOSResult = err; + } } else if (!mCodec->mPortEOS[kPortIndexInput]) { if (err != ERROR_END_OF_STREAM) { ALOGV("[%s] Signalling EOS on the input port " @@ -1582,8 +2345,8 @@ bool ACodec::BaseState::onOMXFillBufferDone( int64_t timeUs, void *platformPrivate, void *dataPtr) { - ALOGV("[%s] onOMXFillBufferDone %p time %lld us", - mCodec->mComponentName.c_str(), bufferID, timeUs); + ALOGV("[%s] onOMXFillBufferDone %p time %lld us, flags = 0x%08lx", + mCodec->mComponentName.c_str(), bufferID, timeUs, flags); ssize_t index; BufferInfo *info = @@ -1601,46 +2364,48 @@ bool ACodec::BaseState::onOMXFillBufferDone( case RESUBMIT_BUFFERS: { - if (rangeLength == 0) { - if (!(flags & OMX_BUFFERFLAG_EOS)) { - ALOGV("[%s] calling fillBuffer %p", - mCodec->mComponentName.c_str(), info->mBufferID); + if (rangeLength == 0 && !(flags & OMX_BUFFERFLAG_EOS)) { + ALOGV("[%s] calling fillBuffer %p", + mCodec->mComponentName.c_str(), info->mBufferID); - CHECK_EQ(mCodec->mOMX->fillBuffer( - mCodec->mNode, info->mBufferID), - (status_t)OK); + CHECK_EQ(mCodec->mOMX->fillBuffer( + mCodec->mNode, info->mBufferID), + (status_t)OK); - info->mStatus = BufferInfo::OWNED_BY_COMPONENT; - } - } else { - if (!mCodec->mSentFormat) { - mCodec->sendFormatChange(); - } + info->mStatus = BufferInfo::OWNED_BY_COMPONENT; + break; + } - if (mCodec->mNativeWindow == NULL) { - info->mData->setRange(rangeOffset, rangeLength); - } + if (!mCodec->mIsEncoder && !mCodec->mSentFormat) { + mCodec->sendFormatChange(); + } - info->mData->meta()->setInt64("timeUs", timeUs); + if (mCodec->mNativeWindow == NULL) { + info->mData->setRange(rangeOffset, rangeLength); + } - sp<AMessage> notify = mCodec->mNotify->dup(); - notify->setInt32("what", ACodec::kWhatDrainThisBuffer); - notify->setPointer("buffer-id", info->mBufferID); - notify->setObject("buffer", info->mData); + info->mData->meta()->setInt64("timeUs", timeUs); - sp<AMessage> reply = - new AMessage(kWhatOutputBufferDrained, mCodec->id()); + sp<AMessage> notify = mCodec->mNotify->dup(); + notify->setInt32("what", ACodec::kWhatDrainThisBuffer); + notify->setPointer("buffer-id", info->mBufferID); + notify->setBuffer("buffer", info->mData); + notify->setInt32("flags", flags); - reply->setPointer("buffer-id", info->mBufferID); + sp<AMessage> reply = + new AMessage(kWhatOutputBufferDrained, mCodec->id()); - notify->setMessage("reply", reply); + reply->setPointer("buffer-id", info->mBufferID); - notify->post(); + notify->setMessage("reply", reply); - info->mStatus = BufferInfo::OWNED_BY_DOWNSTREAM; - } + notify->post(); + + info->mStatus = BufferInfo::OWNED_BY_DOWNSTREAM; if (flags & OMX_BUFFERFLAG_EOS) { + ALOGV("[%s] saw output EOS", mCodec->mComponentName.c_str()); + sp<AMessage> notify = mCodec->mNotify->dup(); notify->setInt32("what", ACodec::kWhatEOS); notify->setInt32("err", mCodec->mInputEOSResult); @@ -1678,12 +2443,13 @@ void ACodec::BaseState::onOutputBufferDrained(const sp<AMessage> &msg) { && msg->findInt32("render", &render) && render != 0) { // The client wants this buffer to be rendered. - if (mCodec->mNativeWindow->queueBuffer( + status_t err; + if ((err = mCodec->mNativeWindow->queueBuffer( mCodec->mNativeWindow.get(), - info->mGraphicBuffer.get()) == OK) { + info->mGraphicBuffer.get())) == OK) { info->mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW; } else { - mCodec->signalError(); + mCodec->signalError(OMX_ErrorUndefined, err); info->mStatus = BufferInfo::OWNED_BY_US; } } else { @@ -1746,6 +2512,10 @@ ACodec::UninitializedState::UninitializedState(ACodec *codec) : BaseState(codec) { } +void ACodec::UninitializedState::stateEntered() { + ALOGV("Now uninitialized"); +} + bool ACodec::UninitializedState::onMessageReceived(const sp<AMessage> &msg) { bool handled = false; @@ -1758,8 +2528,20 @@ bool ACodec::UninitializedState::onMessageReceived(const sp<AMessage> &msg) { break; } + case ACodec::kWhatAllocateComponent: + { + onAllocateComponent(msg); + handled = true; + break; + } + case ACodec::kWhatShutdown: { + int32_t keepComponentAllocated; + CHECK(msg->findInt32( + "keepComponentAllocated", &keepComponentAllocated)); + CHECK(!keepComponentAllocated); + sp<AMessage> notify = mCodec->mNotify->dup(); notify->setInt32("what", ACodec::kWhatShutdownCompleted); notify->post(); @@ -1787,30 +2569,60 @@ bool ACodec::UninitializedState::onMessageReceived(const sp<AMessage> &msg) { void ACodec::UninitializedState::onSetup( const sp<AMessage> &msg) { + if (onAllocateComponent(msg) + && mCodec->mLoadedState->onConfigureComponent(msg)) { + mCodec->mLoadedState->onStart(); + } +} + +bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) { + ALOGV("onAllocateComponent"); + + CHECK(mCodec->mNode == NULL); + OMXClient client; CHECK_EQ(client.connect(), (status_t)OK); sp<IOMX> omx = client.interface(); + Vector<String8> matchingCodecs; + Vector<uint32_t> matchingCodecQuirks; + AString mime; - CHECK(msg->findString("mime", &mime)); - Vector<String8> matchingCodecs; - OMXCodec::findMatchingCodecs( - mime.c_str(), - false, // createEncoder - NULL, // matchComponentName - 0, // flags - &matchingCodecs); + AString componentName; + uint32_t quirks; + if (msg->findString("componentName", &componentName)) { + matchingCodecs.push_back(String8(componentName.c_str())); + + if (!OMXCodec::findCodecQuirks(componentName.c_str(), &quirks)) { + quirks = 0; + } + matchingCodecQuirks.push_back(quirks); + } else { + CHECK(msg->findString("mime", &mime)); + + int32_t encoder; + if (!msg->findInt32("encoder", &encoder)) { + encoder = false; + } + + OMXCodec::findMatchingCodecs( + mime.c_str(), + encoder, // createEncoder + NULL, // matchComponentName + 0, // flags + &matchingCodecs, + &matchingCodecQuirks); + } sp<CodecObserver> observer = new CodecObserver; IOMX::node_id node = NULL; - AString componentName; - for (size_t matchIndex = 0; matchIndex < matchingCodecs.size(); ++matchIndex) { componentName = matchingCodecs.itemAt(matchIndex).string(); + quirks = matchingCodecQuirks.itemAt(matchIndex); pid_t tid = androidGetTid(); int prevPriority = androidGetThreadPriority(tid); @@ -1826,16 +2638,22 @@ void ACodec::UninitializedState::onSetup( } if (node == NULL) { - ALOGE("Unable to instantiate a decoder for type '%s'.", mime.c_str()); + if (!mime.empty()) { + ALOGE("Unable to instantiate a decoder for type '%s'.", + mime.c_str()); + } else { + ALOGE("Unable to instantiate decoder '%s'.", componentName.c_str()); + } mCodec->signalError(OMX_ErrorComponentNotFound); - return; + return false; } sp<AMessage> notify = new AMessage(kWhatOMXMessage, mCodec->id()); observer->setNotificationMessage(notify); mCodec->mComponentName = componentName; + mCodec->mQuirks = quirks; mCodec->mOMX = omx; mCodec->mNode = node; @@ -1844,20 +2662,142 @@ void ACodec::UninitializedState::onSetup( mCodec->mInputEOSResult = OK; - mCodec->configureCodec(mime.c_str(), msg); + { + sp<AMessage> notify = mCodec->mNotify->dup(); + notify->setInt32("what", ACodec::kWhatComponentAllocated); + notify->setString("componentName", mCodec->mComponentName.c_str()); + notify->post(); + } + + mCodec->changeState(mCodec->mLoadedState); + + return true; +} + +//////////////////////////////////////////////////////////////////////////////// + +ACodec::LoadedState::LoadedState(ACodec *codec) + : BaseState(codec) { +} + +void ACodec::LoadedState::stateEntered() { + ALOGV("[%s] Now Loaded", mCodec->mComponentName.c_str()); + + if (mCodec->mShutdownInProgress) { + bool keepComponentAllocated = mCodec->mKeepComponentAllocated; + + mCodec->mShutdownInProgress = false; + mCodec->mKeepComponentAllocated = false; + + onShutdown(keepComponentAllocated); + } +} + +void ACodec::LoadedState::onShutdown(bool keepComponentAllocated) { + if (!keepComponentAllocated) { + CHECK_EQ(mCodec->mOMX->freeNode(mCodec->mNode), (status_t)OK); + + mCodec->mNativeWindow.clear(); + mCodec->mNode = NULL; + mCodec->mOMX.clear(); + mCodec->mQuirks = 0; + mCodec->mComponentName.clear(); + + mCodec->changeState(mCodec->mUninitializedState); + } + + sp<AMessage> notify = mCodec->mNotify->dup(); + notify->setInt32("what", ACodec::kWhatShutdownCompleted); + notify->post(); +} + +bool ACodec::LoadedState::onMessageReceived(const sp<AMessage> &msg) { + bool handled = false; + + switch (msg->what()) { + case ACodec::kWhatConfigureComponent: + { + onConfigureComponent(msg); + handled = true; + break; + } + + case ACodec::kWhatStart: + { + onStart(); + handled = true; + break; + } + + case ACodec::kWhatShutdown: + { + int32_t keepComponentAllocated; + CHECK(msg->findInt32( + "keepComponentAllocated", &keepComponentAllocated)); + + onShutdown(keepComponentAllocated); + + handled = true; + break; + } + + case ACodec::kWhatFlush: + { + sp<AMessage> notify = mCodec->mNotify->dup(); + notify->setInt32("what", ACodec::kWhatFlushCompleted); + notify->post(); + + handled = true; + break; + } + + default: + return BaseState::onMessageReceived(msg); + } + + return handled; +} + +bool ACodec::LoadedState::onConfigureComponent( + const sp<AMessage> &msg) { + ALOGV("onConfigureComponent"); + + CHECK(mCodec->mNode != NULL); + + AString mime; + CHECK(msg->findString("mime", &mime)); + + status_t err = mCodec->configureCodec(mime.c_str(), msg); + + if (err != OK) { + mCodec->signalError(OMX_ErrorUndefined, err); + return false; + } sp<RefBase> obj; if (msg->findObject("native-window", &obj) - && strncmp("OMX.google.", componentName.c_str(), 11)) { + && strncmp("OMX.google.", mCodec->mComponentName.c_str(), 11)) { sp<NativeWindowWrapper> nativeWindow( static_cast<NativeWindowWrapper *>(obj.get())); CHECK(nativeWindow != NULL); mCodec->mNativeWindow = nativeWindow->getNativeWindow(); } - CHECK_EQ((status_t)OK, mCodec->initNativeWindow()); - CHECK_EQ(omx->sendCommand(node, OMX_CommandStateSet, OMX_StateIdle), + { + sp<AMessage> notify = mCodec->mNotify->dup(); + notify->setInt32("what", ACodec::kWhatComponentConfigured); + notify->post(); + } + + return true; +} + +void ACodec::LoadedState::onStart() { + ALOGV("onStart"); + + CHECK_EQ(mCodec->mOMX->sendCommand( + mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle), (status_t)OK); mCodec->changeState(mCodec->mLoadedToIdleState); @@ -1878,7 +2818,7 @@ void ACodec::LoadedToIdleState::stateEntered() { "(error 0x%08x)", err); - mCodec->signalError(); + mCodec->signalError(OMX_ErrorUndefined, err); } } @@ -2042,6 +2982,13 @@ bool ACodec::ExecutingState::onMessageReceived(const sp<AMessage> &msg) { switch (msg->what()) { case kWhatShutdown: { + int32_t keepComponentAllocated; + CHECK(msg->findInt32( + "keepComponentAllocated", &keepComponentAllocated)); + + mCodec->mShutdownInProgress = true; + mCodec->mKeepComponentAllocated = keepComponentAllocated; + mActive = false; CHECK_EQ(mCodec->mOMX->sendCommand( @@ -2202,7 +3149,7 @@ bool ACodec::OutputPortSettingsChangedState::onOMXEvent( "port reconfiguration (error 0x%08x)", err); - mCodec->signalError(); + mCodec->signalError(OMX_ErrorUndefined, err); // This is technically not correct, since we were unable // to allocate output buffers and therefore the output port @@ -2240,7 +3187,8 @@ bool ACodec::OutputPortSettingsChangedState::onOMXEvent( //////////////////////////////////////////////////////////////////////////////// ACodec::ExecutingToIdleState::ExecutingToIdleState(ACodec *codec) - : BaseState(codec) { + : BaseState(codec), + mComponentNowIdle(false) { } bool ACodec::ExecutingToIdleState::onMessageReceived(const sp<AMessage> &msg) { @@ -2274,6 +3222,7 @@ bool ACodec::ExecutingToIdleState::onMessageReceived(const sp<AMessage> &msg) { void ACodec::ExecutingToIdleState::stateEntered() { ALOGV("[%s] Now Executing->Idle", mCodec->mComponentName.c_str()); + mComponentNowIdle = false; mCodec->mSentFormat = false; } @@ -2285,6 +3234,8 @@ bool ACodec::ExecutingToIdleState::onOMXEvent( CHECK_EQ(data1, (OMX_U32)OMX_CommandStateSet); CHECK_EQ(data2, (OMX_U32)OMX_StateIdle); + mComponentNowIdle = true; + changeStateIfWeOwnAllBuffers(); return true; @@ -2303,7 +3254,7 @@ bool ACodec::ExecutingToIdleState::onOMXEvent( } void ACodec::ExecutingToIdleState::changeStateIfWeOwnAllBuffers() { - if (mCodec->allYourBuffersAreBelongToUs()) { + if (mComponentNowIdle && mCodec->allYourBuffersAreBelongToUs()) { CHECK_EQ(mCodec->mOMX->sendCommand( mCodec->mNode, OMX_CommandStateSet, OMX_StateLoaded), (status_t)OK); @@ -2375,20 +3326,7 @@ bool ACodec::IdleToLoadedState::onOMXEvent( CHECK_EQ(data1, (OMX_U32)OMX_CommandStateSet); CHECK_EQ(data2, (OMX_U32)OMX_StateLoaded); - ALOGV("[%s] Now Loaded", mCodec->mComponentName.c_str()); - - CHECK_EQ(mCodec->mOMX->freeNode(mCodec->mNode), (status_t)OK); - - mCodec->mNativeWindow.clear(); - mCodec->mNode = NULL; - mCodec->mOMX.clear(); - mCodec->mComponentName.clear(); - - mCodec->changeState(mCodec->mUninitializedState); - - sp<AMessage> notify = mCodec->mNotify->dup(); - notify->setInt32("what", ACodec::kWhatShutdownCompleted); - notify->post(); + mCodec->changeState(mCodec->mLoadedState); return true; } diff --git a/media/libstagefright/AMRExtractor.cpp b/media/libstagefright/AMRExtractor.cpp index 5a28347..03dcbf9 100644 --- a/media/libstagefright/AMRExtractor.cpp +++ b/media/libstagefright/AMRExtractor.cpp @@ -20,9 +20,9 @@ #include "include/AMRExtractor.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaSource.h> diff --git a/media/libstagefright/AMRWriter.cpp b/media/libstagefright/AMRWriter.cpp index 6c4e307..ca85640 100644 --- a/media/libstagefright/AMRWriter.cpp +++ b/media/libstagefright/AMRWriter.cpp @@ -14,9 +14,9 @@ * limitations under the License. */ +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/AMRWriter.h> #include <media/stagefright/MediaBuffer.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaSource.h> @@ -52,7 +52,7 @@ AMRWriter::AMRWriter(int fd) AMRWriter::~AMRWriter() { if (mStarted) { - stop(); + reset(); } if (mFd != -1) { @@ -152,7 +152,7 @@ status_t AMRWriter::pause() { return OK; } -status_t AMRWriter::stop() { +status_t AMRWriter::reset() { if (!mStarted) { return OK; } diff --git a/media/libstagefright/AVIExtractor.cpp b/media/libstagefright/AVIExtractor.cpp index a3187b7..5a6211e 100644 --- a/media/libstagefright/AVIExtractor.cpp +++ b/media/libstagefright/AVIExtractor.cpp @@ -577,6 +577,7 @@ static const char *GetMIMETypeForHandler(uint32_t handler) { case FOURCC('a', 'v', 'c', '1'): case FOURCC('d', 'a', 'v', 'c'): case FOURCC('x', '2', '6', '4'): + case FOURCC('H', '2', '6', '4'): case FOURCC('v', 's', 's', 'h'): return MEDIA_MIMETYPE_VIDEO_AVC; diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk index 690deac..5aea8d0 100644 --- a/media/libstagefright/Android.mk +++ b/media/libstagefright/Android.mk @@ -9,6 +9,7 @@ LOCAL_SRC_FILES:= \ AACWriter.cpp \ AMRExtractor.cpp \ AMRWriter.cpp \ + AVIExtractor.cpp \ AudioPlayer.cpp \ AudioSource.cpp \ AwesomePlayer.cpp \ @@ -28,12 +29,14 @@ LOCAL_SRC_FILES:= \ MPEG4Writer.cpp \ MediaBuffer.cpp \ MediaBufferGroup.cpp \ + MediaCodec.cpp \ + MediaCodecList.cpp \ MediaDefs.cpp \ MediaExtractor.cpp \ MediaSource.cpp \ - MediaSourceSplitter.cpp \ MetaData.cpp \ NuCachedSource2.cpp \ + NuMediaExtractor.cpp \ OMXClient.cpp \ OMXCodec.cpp \ OggExtractor.cpp \ @@ -55,30 +58,38 @@ LOCAL_SRC_FILES:= \ LOCAL_C_INCLUDES:= \ $(JNI_H_INCLUDE) \ $(TOP)/frameworks/base/include/media/stagefright/openmax \ + $(TOP)/frameworks/base/include/media/stagefright/timedtext \ + $(TOP)/external/expat/lib \ $(TOP)/external/flac/include \ $(TOP)/external/tremolo \ $(TOP)/external/openssl/include \ LOCAL_SHARED_LIBRARIES := \ - libbinder \ - libmedia \ - libutils \ - libcutils \ - libui \ - libsonivox \ - libvorbisidec \ - libstagefright_yuv \ + libbinder \ libcamera_client \ - libdrmframework \ - libcrypto \ - libssl \ - libgui \ + libchromium_net \ + libcrypto \ + libcutils \ + libdl \ + libdrmframework \ + libexpat \ + libgui \ + libicui18n \ + libicuuc \ + liblog \ + libmedia \ + libsonivox \ + libssl \ + libstagefright_omx \ + libstagefright_yuv \ + libui \ + libutils \ + libvorbisidec \ + libz \ LOCAL_STATIC_LIBRARIES := \ libstagefright_color_conversion \ libstagefright_aacenc \ - libstagefright_amrnbenc \ - libstagefright_amrwbenc \ libstagefright_avcenc \ libstagefright_m4vh263enc \ libstagefright_matroska \ @@ -88,59 +99,15 @@ LOCAL_STATIC_LIBRARIES := \ libstagefright_httplive \ libstagefright_id3 \ libFLAC \ + libstagefright_chromium_http \ -################################################################################ - -# The following was shamelessly copied from external/webkit/Android.mk and -# currently must follow the same logic to determine how webkit was built and -# if it's safe to link against libchromium.net - -# V8 also requires an ARMv7 CPU, and since we must use jsc, we cannot -# use the Chrome http stack either. -ifneq ($(strip $(ARCH_ARM_HAVE_ARMV7A)),true) - USE_ALT_HTTP := true -endif - -# See if the user has specified a stack they want to use -HTTP_STACK = $(HTTP) -# We default to the Chrome HTTP stack. -DEFAULT_HTTP = chrome -ALT_HTTP = android - -ifneq ($(HTTP_STACK),chrome) - ifneq ($(HTTP_STACK),android) - # No HTTP stack is specified, pickup the one we want as default. - ifeq ($(USE_ALT_HTTP),true) - HTTP_STACK = $(ALT_HTTP) - else - HTTP_STACK = $(DEFAULT_HTTP) - endif - endif -endif - -ifeq ($(HTTP_STACK),chrome) - -LOCAL_SHARED_LIBRARIES += \ - liblog \ - libicuuc \ - libicui18n \ - libz \ - libdl \ - -LOCAL_STATIC_LIBRARIES += \ - libstagefright_chromium_http - -LOCAL_SHARED_LIBRARIES += libstlport libchromium_net +LOCAL_SHARED_LIBRARIES += libstlport include external/stlport/libstlport.mk +# TODO: Chromium is always available, so this flag can be removed. LOCAL_CPPFLAGS += -DCHROMIUM_AVAILABLE=1 -endif # ifeq ($(HTTP_STACK),chrome) - -################################################################################ - LOCAL_SHARED_LIBRARIES += \ - libstagefright_amrnb_common \ libstagefright_enc_common \ libstagefright_avc_common \ libstagefright_foundation \ diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp index 9a9c3ef..2b3cb1a 100644 --- a/media/libstagefright/AudioPlayer.cpp +++ b/media/libstagefright/AudioPlayer.cpp @@ -110,13 +110,18 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) { success = format->findInt32(kKeySampleRate, &mSampleRate); CHECK(success); - int32_t numChannels; + int32_t numChannels, channelMask; success = format->findInt32(kKeyChannelCount, &numChannels); CHECK(success); + if(!format->findInt32(kKeyChannelMask, &channelMask)) { + ALOGW("source format didn't specify channel mask, using channel order"); + channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; + } + if (mAudioSink.get() != NULL) { status_t err = mAudioSink->open( - mSampleRate, numChannels, AUDIO_FORMAT_PCM_16_BIT, + mSampleRate, numChannels, channelMask, AUDIO_FORMAT_PCM_16_BIT, DEFAULT_AUDIOSINK_BUFFERCOUNT, &AudioPlayer::AudioSinkCallback, this); if (err != OK) { @@ -137,11 +142,15 @@ status_t AudioPlayer::start(bool sourceAlreadyStarted) { mAudioSink->start(); } else { + // playing to an AudioTrack, set up mask if necessary + audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ? + audio_channel_mask_from_count(numChannels) : channelMask; + if (0 == audioMask) { + return BAD_VALUE; + } + mAudioTrack = new AudioTrack( - AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, - (numChannels == 2) - ? AUDIO_CHANNEL_OUT_STEREO - : AUDIO_CHANNEL_OUT_MONO, + AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask, 0, 0, &AudioCallback, this, 0); if ((err = mAudioTrack->initCheck()) != OK) { @@ -268,6 +277,16 @@ bool AudioPlayer::reachedEOS(status_t *finalStatus) { return mReachedEOS; } +status_t AudioPlayer::setPlaybackRatePermille(int32_t ratePermille) { + if (mAudioSink.get() != NULL) { + return mAudioSink->setPlaybackRatePermille(ratePermille); + } else if (mAudioTrack != NULL){ + return mAudioTrack->setSampleRate(ratePermille * mSampleRate / 1000); + } else { + return NO_INIT; + } +} + // static size_t AudioPlayer::AudioSinkCallback( MediaPlayerBase::AudioSink *audioSink, @@ -408,12 +427,21 @@ size_t AudioPlayer::fillBuffer(void *data, size_t size) { break; } + if (mAudioSink != NULL) { + mLatencyUs = (int64_t)mAudioSink->latency() * 1000; + } else { + mLatencyUs = (int64_t)mAudioTrack->latency() * 1000; + } + CHECK(mInputBuffer->meta_data()->findInt64( kKeyTime, &mPositionTimeMediaUs)); mPositionTimeRealUs = - ((mNumFramesPlayed + size_done / mFrameSize) * 1000000) + -mLatencyUs + ((mNumFramesPlayed + size_done / mFrameSize) * 1000000) / mSampleRate; + if (mPositionTimeRealUs < 0) { + mPositionTimeRealUs = 0; + } ALOGV("buffer->size() = %d, " "mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f", @@ -468,7 +496,9 @@ int64_t AudioPlayer::getRealTimeUs() { int64_t AudioPlayer::getRealTimeUsLocked() const { CHECK(mStarted); CHECK_NE(mSampleRate, 0); - return -mLatencyUs + (mNumFramesPlayed * 1000000) / mSampleRate; + int64_t t = -mLatencyUs + (mNumFramesPlayed * 1000000) / mSampleRate; + if (t < 0) return 0; + return t; } int64_t AudioPlayer::getMediaTimeUs() { diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp index 2172cc0..5b2ea1f 100644 --- a/media/libstagefright/AudioSource.cpp +++ b/media/libstagefright/AudioSource.cpp @@ -47,7 +47,7 @@ static void AudioRecordCallbackFunction(int event, void *user, void *info) { } AudioSource::AudioSource( - int inputSource, uint32_t sampleRate, uint32_t channels) + audio_source_t inputSource, uint32_t sampleRate, uint32_t channels) : mStarted(false), mSampleRate(sampleRate), mPrevSampleTimeUs(0), @@ -72,7 +72,7 @@ AudioSource::AudioSource( AudioSource::~AudioSource() { if (mStarted) { - stop(); + reset(); } delete mRecord; @@ -130,7 +130,7 @@ void AudioSource::waitOutstandingEncodingFrames_l() { } } -status_t AudioSource::stop() { +status_t AudioSource::reset() { Mutex::Autolock autoLock(mLock); if (!mStarted) { return UNKNOWN_ERROR; @@ -282,8 +282,6 @@ status_t AudioSource::dataCallbackTimestamp( mPrevSampleTimeUs = mStartTimeUs; } - int64_t timestampUs = mPrevSampleTimeUs; - size_t numLostBytes = 0; if (mNumFramesReceived > 0) { // Ignore earlier frame lost // getInputFramesLost() returns the number of lost frames. @@ -293,37 +291,58 @@ status_t AudioSource::dataCallbackTimestamp( CHECK_EQ(numLostBytes & 1, 0u); CHECK_EQ(audioBuffer.size & 1, 0u); - size_t bufferSize = numLostBytes + audioBuffer.size; - MediaBuffer *buffer = new MediaBuffer(bufferSize); if (numLostBytes > 0) { - memset(buffer->data(), 0, numLostBytes); - memcpy((uint8_t *) buffer->data() + numLostBytes, - audioBuffer.i16, audioBuffer.size); - } else { - if (audioBuffer.size == 0) { - ALOGW("Nothing is available from AudioRecord callback buffer"); - buffer->release(); - return OK; + // Loss of audio frames should happen rarely; thus the LOGW should + // not cause a logging spam + ALOGW("Lost audio record data: %d bytes", numLostBytes); + } + + while (numLostBytes > 0) { + size_t bufferSize = numLostBytes; + if (numLostBytes > kMaxBufferSize) { + numLostBytes -= kMaxBufferSize; + bufferSize = kMaxBufferSize; + } else { + numLostBytes = 0; } - memcpy((uint8_t *) buffer->data(), - audioBuffer.i16, audioBuffer.size); + MediaBuffer *lostAudioBuffer = new MediaBuffer(bufferSize); + memset(lostAudioBuffer->data(), 0, bufferSize); + lostAudioBuffer->set_range(0, bufferSize); + queueInputBuffer_l(lostAudioBuffer, timeUs); + } + + if (audioBuffer.size == 0) { + ALOGW("Nothing is available from AudioRecord callback buffer"); + return OK; } + const size_t bufferSize = audioBuffer.size; + MediaBuffer *buffer = new MediaBuffer(bufferSize); + memcpy((uint8_t *) buffer->data(), + audioBuffer.i16, audioBuffer.size); buffer->set_range(0, bufferSize); - timestampUs += ((1000000LL * (bufferSize >> 1)) + - (mSampleRate >> 1)) / mSampleRate; + queueInputBuffer_l(buffer, timeUs); + return OK; +} + +void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) { + const size_t bufferSize = buffer->range_length(); + const size_t frameSize = mRecord->frameSize(); + const int64_t timestampUs = + mPrevSampleTimeUs + + ((1000000LL * (bufferSize / frameSize)) + + (mSampleRate >> 1)) / mSampleRate; if (mNumFramesReceived == 0) { buffer->meta_data()->setInt64(kKeyAnchorTime, mStartTimeUs); } + buffer->meta_data()->setInt64(kKeyTime, mPrevSampleTimeUs); buffer->meta_data()->setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs); mPrevSampleTimeUs = timestampUs; - mNumFramesReceived += buffer->range_length() / sizeof(int16_t); + mNumFramesReceived += bufferSize / frameSize; mBuffersReceived.push_back(buffer); mFrameAvailableCondition.signal(); - - return OK; } void AudioSource::trackMaxAmplitude(int16_t *data, int nSamples) { diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp index d0cb7ff..9e00bb3 100644 --- a/media/libstagefright/AwesomePlayer.cpp +++ b/media/libstagefright/AwesomePlayer.cpp @@ -30,13 +30,12 @@ #include "include/MPEG2TSExtractor.h" #include "include/WVMExtractor.h" -#include "timedtext/TimedTextPlayer.h" - #include <binder/IPCThreadState.h> #include <binder/IServiceManager.h> #include <media/IMediaPlayerService.h> #include <media/stagefright/foundation/hexdump.h> #include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/timedtext/TimedTextDriver.h> #include <media/stagefright/AudioPlayer.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/FileSource.h> @@ -47,10 +46,8 @@ #include <media/stagefright/MetaData.h> #include <media/stagefright/OMXCodec.h> -#include <surfaceflinger/Surface.h> #include <gui/ISurfaceTexture.h> #include <gui/SurfaceTextureClient.h> -#include <surfaceflinger/ISurfaceComposer.h> #include <media/stagefright/foundation/AMessage.h> @@ -192,7 +189,7 @@ AwesomePlayer::AwesomePlayer() mVideoBuffer(NULL), mDecryptHandle(NULL), mLastVideoTimeUs(-1), - mTextPlayer(NULL) { + mTextDriver(NULL) { CHECK_EQ(mClient.connect(), (status_t)OK); DataSource::RegisterDefaultSniffers(); @@ -335,6 +332,14 @@ status_t AwesomePlayer::setDataSource_l( return UNKNOWN_ERROR; } + if (extractor->getDrmFlag()) { + checkDrmStatus(dataSource); + } + + return setDataSource_l(extractor); +} + +void AwesomePlayer::checkDrmStatus(const sp<DataSource>& dataSource) { dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient); if (mDecryptHandle != NULL) { CHECK(mDrmManagerClient); @@ -342,8 +347,6 @@ status_t AwesomePlayer::setDataSource_l( notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE); } } - - return setDataSource_l(extractor); } status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) { @@ -524,9 +527,9 @@ void AwesomePlayer::reset_l() { delete mAudioPlayer; mAudioPlayer = NULL; - if (mTextPlayer != NULL) { - delete mTextPlayer; - mTextPlayer = NULL; + if (mTextDriver != NULL) { + delete mTextDriver; + mTextDriver = NULL; } mVideoRenderer.clear(); @@ -1112,7 +1115,7 @@ status_t AwesomePlayer::pause_l(bool at_eos) { } if (mFlags & TEXTPLAYER_STARTED) { - mTextPlayer->pause(); + mTextDriver->pause(); modifyFlags(TEXT_RUNNING, CLEAR); } @@ -1266,9 +1269,9 @@ status_t AwesomePlayer::seekTo(int64_t timeUs) { } status_t AwesomePlayer::setTimedTextTrackIndex(int32_t index) { - if (mTextPlayer != NULL) { + if (mTextDriver != NULL) { if (index >= 0) { // to turn on a text track - status_t err = mTextPlayer->setTimedTextTrackIndex(index); + status_t err = mTextDriver->setTimedTextTrackIndex(index); if (err != OK) { return err; } @@ -1284,7 +1287,7 @@ status_t AwesomePlayer::setTimedTextTrackIndex(int32_t index) { modifyFlags(TEXTPLAYER_STARTED, CLEAR); } - return mTextPlayer->setTimedTextTrackIndex(index); + return mTextDriver->setTimedTextTrackIndex(index); } } else { return INVALID_OPERATION; @@ -1313,7 +1316,7 @@ status_t AwesomePlayer::seekTo_l(int64_t timeUs) { seekAudioIfNecessary_l(); if (mFlags & TEXTPLAYER_STARTED) { - mTextPlayer->seekTo(mSeekTimeUs); + mTextDriver->seekToAsync(mSeekTimeUs); } if (!(mFlags & PLAYING)) { @@ -1354,15 +1357,15 @@ void AwesomePlayer::setAudioSource(sp<MediaSource> source) { mAudioTrack = source; } -void AwesomePlayer::addTextSource(sp<MediaSource> source) { +void AwesomePlayer::addTextSource(const sp<MediaSource>& source) { Mutex::Autolock autoLock(mTimedTextLock); CHECK(source != NULL); - if (mTextPlayer == NULL) { - mTextPlayer = new TimedTextPlayer(this, mListener, &mQueue); + if (mTextDriver == NULL) { + mTextDriver = new TimedTextDriver(mListener); } - mTextPlayer->addTextSource(source); + mTextDriver->addInBandTextSource(source); } status_t AwesomePlayer::initAudioDecoder() { @@ -1603,7 +1606,7 @@ void AwesomePlayer::onVideoEvent() { mSeekTimeUs, mSeeking == SEEK_VIDEO_ONLY ? MediaSource::ReadOptions::SEEK_NEXT_SYNC - : MediaSource::ReadOptions::SEEK_CLOSEST_SYNC); + : MediaSource::ReadOptions::SEEK_CLOSEST); } for (;;) { status_t err = mVideoSource->read(&mVideoBuffer, &options); @@ -1689,7 +1692,7 @@ void AwesomePlayer::onVideoEvent() { } if ((mFlags & TEXTPLAYER_STARTED) && !(mFlags & (TEXT_RUNNING | SEEK_PREVIEW))) { - mTextPlayer->resume(); + mTextDriver->resume(); modifyFlags(TEXT_RUNNING, SET); } @@ -2095,7 +2098,7 @@ status_t AwesomePlayer::finishSetDataSource_l() { String8 mimeType; float confidence; sp<AMessage> dummy; - bool success = SniffDRM(dataSource, &mimeType, &confidence, &dummy); + bool success = SniffWVM(dataSource, &mimeType, &confidence, &dummy); if (!success || strcasecmp( @@ -2105,6 +2108,8 @@ status_t AwesomePlayer::finishSetDataSource_l() { mWVMExtractor = new WVMExtractor(dataSource); mWVMExtractor->setAdaptiveStreamingMode(true); + if (mUIDValid) + mWVMExtractor->setUID(mUID); extractor = mWVMExtractor; } else { extractor = MediaExtractor::Create( @@ -2115,13 +2120,8 @@ status_t AwesomePlayer::finishSetDataSource_l() { } } - dataSource->getDrmInfo(mDecryptHandle, &mDrmManagerClient); - - if (mDecryptHandle != NULL) { - CHECK(mDrmManagerClient); - if (RightsStatus::RIGHTS_VALID != mDecryptHandle->status) { - notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, ERROR_DRM_NO_LICENSE); - } + if (extractor->getDrmFlag()) { + checkDrmStatus(dataSource); } status_t err = setDataSource_l(extractor); @@ -2240,16 +2240,24 @@ status_t AwesomePlayer::setParameter(int key, const Parcel &request) { case KEY_PARAMETER_TIMED_TEXT_ADD_OUT_OF_BAND_SOURCE: { Mutex::Autolock autoLock(mTimedTextLock); - if (mTextPlayer == NULL) { - mTextPlayer = new TimedTextPlayer(this, mListener, &mQueue); + if (mTextDriver == NULL) { + mTextDriver = new TimedTextDriver(mListener); } - return mTextPlayer->setParameter(key, request); + return mTextDriver->addOutOfBandTextSource(request); } case KEY_PARAMETER_CACHE_STAT_COLLECT_FREQ_MS: { return setCacheStatCollectFreq(request); } + case KEY_PARAMETER_PLAYBACK_RATE_PERMILLE: + { + if (mAudioPlayer != NULL) { + return mAudioPlayer->setPlaybackRatePermille(request.readInt32()); + } else { + return NO_INIT; + } + } default: { return ERROR_UNSUPPORTED; diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp index 1850c9c..2df5528 100755 --- a/media/libstagefright/CameraSource.cpp +++ b/media/libstagefright/CameraSource.cpp @@ -20,14 +20,14 @@ #include <OMX_Component.h> #include <binder/IPCThreadState.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/CameraSource.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> #include <camera/Camera.h> #include <camera/CameraParameters.h> -#include <surfaceflinger/Surface.h> +#include <gui/Surface.h> #include <utils/String8.h> #include <cutils/properties.h> @@ -114,7 +114,7 @@ static int32_t getColorFormat(const char* colorFormat) { ALOGE("Uknown color format (%s), please add it to " "CameraSource::getColorFormat", colorFormat); - CHECK_EQ(0, "Unknown color format"); + CHECK(!"Unknown color format"); } CameraSource *CameraSource::Create() { @@ -517,7 +517,7 @@ status_t CameraSource::initWithCameraAccess( // This CHECK is good, since we just passed the lock/unlock // check earlier by calling mCamera->setParameters(). - CHECK_EQ(OK, mCamera->setPreviewDisplay(mSurface)); + CHECK_EQ((status_t)OK, mCamera->setPreviewDisplay(mSurface)); // By default, do not store metadata in video buffers mIsMetaDataStoredInVideoBuffers = false; @@ -548,7 +548,7 @@ status_t CameraSource::initWithCameraAccess( CameraSource::~CameraSource() { if (mStarted) { - stop(); + reset(); } else if (mInitCheck == OK) { // Camera is initialized but because start() is never called, // the lock on Camera is never released(). This makes sure @@ -566,7 +566,8 @@ void CameraSource::startCameraRecording() { if (mCameraFlags & FLAGS_HOT_CAMERA) { mCamera->unlock(); mCamera.clear(); - CHECK_EQ(OK, mCameraRecordingProxy->startRecording(new ProxyListener(this))); + CHECK_EQ((status_t)OK, + mCameraRecordingProxy->startRecording(new ProxyListener(this))); } else { mCamera->setListener(new CameraSourceListener(this)); mCamera->startRecording(); @@ -632,8 +633,8 @@ void CameraSource::releaseCamera() { mCameraFlags = 0; } -status_t CameraSource::stop() { - ALOGD("stop: E"); +status_t CameraSource::reset() { + ALOGD("reset: E"); Mutex::Autolock autoLock(mLock); mStarted = false; mFrameAvailableCondition.signal(); @@ -670,7 +671,7 @@ status_t CameraSource::stop() { } CHECK_EQ(mNumFramesReceived, mNumFramesEncoded + mNumFramesDropped); - ALOGD("stop: X"); + ALOGD("reset: X"); return OK; } @@ -718,7 +719,7 @@ void CameraSource::signalBufferReturned(MediaBuffer *buffer) { return; } } - CHECK_EQ(0, "signalBufferReturned: bogus buffer"); + CHECK(!"signalBufferReturned: bogus buffer"); } status_t CameraSource::read( diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp index 263ab50..26ce7ae 100644 --- a/media/libstagefright/CameraSourceTimeLapse.cpp +++ b/media/libstagefright/CameraSourceTimeLapse.cpp @@ -20,9 +20,9 @@ #include <binder/IPCThreadState.h> #include <binder/MemoryBase.h> #include <binder/MemoryHeapBase.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/CameraSource.h> #include <media/stagefright/CameraSourceTimeLapse.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MetaData.h> #include <camera/Camera.h> #include <camera/CameraParameters.h> @@ -87,6 +87,10 @@ CameraSourceTimeLapse::CameraSourceTimeLapse( } CameraSourceTimeLapse::~CameraSourceTimeLapse() { + if (mLastReadBufferCopy) { + mLastReadBufferCopy->release(); + mLastReadBufferCopy = NULL; + } } void CameraSourceTimeLapse::startQuickReadReturns() { @@ -204,15 +208,6 @@ status_t CameraSourceTimeLapse::read( } } -void CameraSourceTimeLapse::stopCameraRecording() { - ALOGV("stopCameraRecording"); - CameraSource::stopCameraRecording(); - if (mLastReadBufferCopy) { - mLastReadBufferCopy->release(); - mLastReadBufferCopy = NULL; - } -} - sp<IMemory> CameraSourceTimeLapse::createIMemoryCopy( const sp<IMemory> &source_data) { diff --git a/media/libstagefright/DRMExtractor.cpp b/media/libstagefright/DRMExtractor.cpp index 9452ab1..524c3aa 100644 --- a/media/libstagefright/DRMExtractor.cpp +++ b/media/libstagefright/DRMExtractor.cpp @@ -23,6 +23,7 @@ #include <arpa/inet.h> #include <utils/String8.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/Utils.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/MediaSource.h> @@ -30,7 +31,6 @@ #include <media/stagefright/MetaData.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaBuffer.h> -#include <media/stagefright/MediaDebug.h> #include <drm/drm_framework_common.h> #include <utils/Errors.h> @@ -282,13 +282,13 @@ bool SniffDRM( if (decryptHandle != NULL) { if (decryptHandle->decryptApiType == DecryptApiType::CONTAINER_BASED) { *mimeType = String8("drm+container_based+") + decryptHandle->mimeType; + *confidence = 10.0f; } else if (decryptHandle->decryptApiType == DecryptApiType::ELEMENTARY_STREAM_BASED) { *mimeType = String8("drm+es_based+") + decryptHandle->mimeType; - } else if (decryptHandle->decryptApiType == DecryptApiType::WV_BASED) { - *mimeType = MEDIA_MIMETYPE_CONTAINER_WVM; - ALOGW("SniffWVM: found match\n"); + *confidence = 10.0f; + } else { + return false; } - *confidence = 10.0f; return true; } diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp index 43539bb..d0a7880 100644 --- a/media/libstagefright/DataSource.cpp +++ b/media/libstagefright/DataSource.cpp @@ -15,6 +15,12 @@ */ #include "include/AMRExtractor.h" +#include "include/AVIExtractor.h" + +#if CHROMIUM_AVAILABLE +#include "include/DataUriSource.h" +#endif + #include "include/MP3Extractor.h" #include "include/MPEG4Extractor.h" #include "include/WAVExtractor.h" @@ -26,6 +32,7 @@ #include "include/DRMExtractor.h" #include "include/FLACExtractor.h" #include "include/AACExtractor.h" +#include "include/WVMExtractor.h" #include "matroska/MatroskaExtractor.h" @@ -112,7 +119,9 @@ void DataSource::RegisterDefaultSniffers() { RegisterSniffer(SniffMPEG2TS); RegisterSniffer(SniffMP3); RegisterSniffer(SniffAAC); + RegisterSniffer(SniffAVI); RegisterSniffer(SniffMPEG2PS); + RegisterSniffer(SniffWVM); char value[PROPERTY_VALUE_MAX]; if (property_get("drm.service.enabled", value, NULL) @@ -134,6 +143,10 @@ sp<DataSource> DataSource::CreateFromURI( return NULL; } source = new NuCachedSource2(httpSource); +# if CHROMIUM_AVAILABLE + } else if (!strncasecmp("data:", uri, 5)) { + source = new DataUriSource(uri); +#endif } else { // Assume it's a filename. source = new FileSource(uri); diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp index 73cb48c..73c8d03 100644 --- a/media/libstagefright/FileSource.cpp +++ b/media/libstagefright/FileSource.cpp @@ -14,8 +14,8 @@ * limitations under the License. */ +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/FileSource.h> -#include <media/stagefright/MediaDebug.h> #include <sys/types.h> #include <unistd.h> #include <sys/types.h> @@ -127,7 +127,7 @@ status_t FileSource::getSize(off64_t *size) { return OK; } -sp<DecryptHandle> FileSource::DrmInitialization() { +sp<DecryptHandle> FileSource::DrmInitialization(const char *mime) { if (mDrmManagerClient == NULL) { mDrmManagerClient = new DrmManagerClient(); } @@ -138,7 +138,7 @@ sp<DecryptHandle> FileSource::DrmInitialization() { if (mDecryptHandle == NULL) { mDecryptHandle = mDrmManagerClient->openDecryptSession( - mFd, mOffset, mLength); + mFd, mOffset, mLength, mime); } if (mDecryptHandle == NULL) { diff --git a/media/libstagefright/JPEGSource.cpp b/media/libstagefright/JPEGSource.cpp index e818115..bafa4b2 100644 --- a/media/libstagefright/JPEGSource.cpp +++ b/media/libstagefright/JPEGSource.cpp @@ -18,10 +18,10 @@ #define LOG_TAG "JPEGSource" #include <utils/Log.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/JPEGSource.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> @@ -59,7 +59,7 @@ JPEGSource::JPEGSource(const sp<DataSource> &source) mWidth(0), mHeight(0), mOffset(0) { - CHECK_EQ(parseJPEG(), OK); + CHECK_EQ(parseJPEG(), (status_t)OK); CHECK(mSource->getSize(&mSize) == OK); } diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp index 36009ab..f702376 100644 --- a/media/libstagefright/MPEG2TSWriter.cpp +++ b/media/libstagefright/MPEG2TSWriter.cpp @@ -244,7 +244,7 @@ void MPEG2TSWriter::SourceInfo::extractCodecSpecificData() { sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kNotifyBuffer); - notify->setObject("buffer", out); + notify->setBuffer("buffer", out); notify->setInt32("oob", true); notify->post(); } @@ -270,7 +270,7 @@ void MPEG2TSWriter::SourceInfo::postAVCFrame(MediaBuffer *buffer) { copy->meta()->setInt32("isSync", true); } - notify->setObject("buffer", copy); + notify->setBuffer("buffer", copy); notify->post(); } @@ -351,7 +351,7 @@ bool MPEG2TSWriter::SourceInfo::flushAACFrames() { sp<AMessage> notify = mNotify->dup(); notify->setInt32("what", kNotifyBuffer); - notify->setObject("buffer", mAACBuffer); + notify->setBuffer("buffer", mAACBuffer); notify->post(); mAACBuffer.clear(); @@ -513,7 +513,7 @@ void MPEG2TSWriter::init() { MPEG2TSWriter::~MPEG2TSWriter() { if (mStarted) { - stop(); + reset(); } mLooper->unregisterHandler(mReflector->id()); @@ -564,7 +564,7 @@ status_t MPEG2TSWriter::start(MetaData *param) { return OK; } -status_t MPEG2TSWriter::stop() { +status_t MPEG2TSWriter::reset() { CHECK(mStarted); for (size_t i = 0; i < mSources.size(); ++i) { @@ -614,10 +614,8 @@ void MPEG2TSWriter::onMessageReceived(const sp<AMessage> &msg) { ++mNumSourcesDone; } else if (what == SourceInfo::kNotifyBuffer) { - sp<RefBase> obj; - CHECK(msg->findObject("buffer", &obj)); - - sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get()); + sp<ABuffer> buffer; + CHECK(msg->findBuffer("buffer", &buffer)); int32_t oob; if (msg->findInt32("oob", &oob) && oob) { diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp index 22bdd95..6c95d4e 100644 --- a/media/libstagefright/MPEG4Extractor.cpp +++ b/media/libstagefright/MPEG4Extractor.cpp @@ -20,7 +20,6 @@ #include "include/MPEG4Extractor.h" #include "include/SampleTable.h" #include "include/ESDS.h" -#include "timedtext/TimedTextPlayer.h" #include <arpa/inet.h> @@ -1372,8 +1371,9 @@ status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) { uint32_t type = ntohl(buffer); // For the 3GPP file format, the handler-type within the 'hdlr' box - // shall be 'text' - if (type == FOURCC('t', 'e', 'x', 't')) { + // shall be 'text'. We also want to support 'sbtl' handler type + // for a practical reason as various MPEG4 containers use it. + if (type == FOURCC('t', 'e', 'x', 't') || type == FOURCC('s', 'b', 't', 'l')) { mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_TEXT_3GPP); } @@ -2429,4 +2429,3 @@ bool SniffMPEG4( } } // namespace android - diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp index 06dd875..7ebbe1d 100755 --- a/media/libstagefright/MPEG4Writer.cpp +++ b/media/libstagefright/MPEG4Writer.cpp @@ -23,10 +23,10 @@ #include <pthread.h> #include <sys/prctl.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MPEG4Writer.h> #include <media/stagefright/MediaBuffer.h> #include <media/stagefright/MetaData.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaSource.h> @@ -70,6 +70,10 @@ public: status_t dump(int fd, const Vector<String16>& args) const; private: + enum { + kMaxCttsOffsetTimeUs = 1000000LL, // 1 second + }; + MPEG4Writer *mOwner; sp<MetaData> mMeta; sp<MediaSource> mSource; @@ -137,11 +141,12 @@ private: : sampleCount(count), sampleDuration(timescaledDur) {} uint32_t sampleCount; - int32_t sampleDuration; // time scale based + uint32_t sampleDuration; // time scale based }; - bool mHasNegativeCttsDeltaDuration; size_t mNumCttsTableEntries; List<CttsTableEntry> mCttsTableEntries; + int64_t mMinCttsOffsetTimeUs; + int64_t mMaxCttsOffsetTimeUs; // Sequence parameter set or picture parameter set struct AVCParamSet { @@ -172,6 +177,8 @@ private: // Update the audio track's drift information. void updateDriftTime(const sp<MetaData>& meta); + int32_t getStartTimeOffsetScaledTime() const; + static void *ThreadWrapper(void *me); status_t threadEntry(); @@ -282,7 +289,7 @@ MPEG4Writer::MPEG4Writer(int fd) } MPEG4Writer::~MPEG4Writer() { - stop(); + reset(); while (!mTracks.empty()) { List<Track *>::iterator it = mTracks.begin(); @@ -471,7 +478,7 @@ status_t MPEG4Writer::start(MetaData *param) { !param->findInt32(kKeyTimeScale, &mTimeScale)) { mTimeScale = 1000; } - CHECK(mTimeScale > 0); + CHECK_GT(mTimeScale, 0); ALOGV("movie time scale: %d", mTimeScale); mStreamableFile = true; @@ -490,7 +497,7 @@ status_t MPEG4Writer::start(MetaData *param) { } mEstimatedMoovBoxSize = estimateMoovBoxSize(bitRate); } - CHECK(mEstimatedMoovBoxSize >= 8); + CHECK_GE(mEstimatedMoovBoxSize, 8); lseek64(mFd, mFreeBoxOffset, SEEK_SET); writeInt32(mEstimatedMoovBoxSize); write("free", 4); @@ -616,7 +623,7 @@ void MPEG4Writer::release() { mStarted = false; } -status_t MPEG4Writer::stop() { +status_t MPEG4Writer::reset() { if (mInitCheck != OK) { return OK; } else { @@ -684,7 +691,7 @@ status_t MPEG4Writer::stop() { mWriteMoovBoxToMemory = false; if (mStreamableFile) { - CHECK(mMoovBoxBufferOffset + 8 <= mEstimatedMoovBoxSize); + CHECK_LE(mMoovBoxBufferOffset + 8, mEstimatedMoovBoxSize); // Moov box lseek64(mFd, mFreeBoxOffset, SEEK_SET); @@ -856,7 +863,7 @@ off64_t MPEG4Writer::addLengthPrefixedSample_l(MediaBuffer *buffer) { mOffset += length + 4; } else { - CHECK(length < 65536); + CHECK_LT(length, 65536); uint8_t x = length >> 8; ::write(mFd, &x, 1); @@ -1085,7 +1092,7 @@ bool MPEG4Writer::reachedEOS() { void MPEG4Writer::setStartTimestampUs(int64_t timeUs) { ALOGI("setStartTimestampUs: %lld", timeUs); - CHECK(timeUs >= 0); + CHECK_GE(timeUs, 0ll); Mutex::Autolock autoLock(mLock); if (mStartTimestampUs < 0 || mStartTimestampUs > timeUs) { mStartTimestampUs = timeUs; @@ -1186,9 +1193,6 @@ void MPEG4Writer::Track::addOneCttsTableEntry( if (mIsAudio) { return; } - if (duration < 0 && !mHasNegativeCttsDeltaDuration) { - mHasNegativeCttsDeltaDuration = true; - } CttsTableEntry cttsEntry(sampleCount, duration); mCttsTableEntries.push_back(cttsEntry); ++mNumCttsTableEntries; @@ -1218,7 +1222,7 @@ void MPEG4Writer::Track::setTimeScale() { mTimeScale = timeScale; } - CHECK(mTimeScale > 0); + CHECK_GT(mTimeScale, 0); } void MPEG4Writer::Track::getCodecSpecificDataFromInputFormatIfPossible() { @@ -1299,7 +1303,7 @@ void MPEG4Writer::bufferChunk(const Chunk& chunk) { } } - CHECK("Received a chunk for a unknown track" == 0); + CHECK(!"Received a chunk for a unknown track"); } void MPEG4Writer::writeChunkToFile(Chunk* chunk) { @@ -1509,7 +1513,6 @@ status_t MPEG4Writer::Track::start(MetaData *params) { mMdatSizeBytes = 0; mMaxChunkDurationUs = 0; - mHasNegativeCttsDeltaDuration = false; pthread_create(&mThread, &attr, ThreadWrapper, this); pthread_attr_destroy(&attr); @@ -1833,29 +1836,18 @@ status_t MPEG4Writer::Track::threadEntry() { int32_t nChunks = 0; int32_t nZeroLengthFrames = 0; int64_t lastTimestampUs = 0; // Previous sample time stamp - int64_t lastCttsTimeUs = 0; // Previous sample time stamp int64_t lastDurationUs = 0; // Between the previous two samples int64_t currDurationTicks = 0; // Timescale based ticks int64_t lastDurationTicks = 0; // Timescale based ticks int32_t sampleCount = 1; // Sample count in the current stts table entry - int64_t currCttsDurTicks = 0; // Timescale based ticks - int64_t lastCttsDurTicks = 0; // Timescale based ticks - int32_t cttsSampleCount = 1; // Sample count in the current ctts table entry - uint32_t previousSampleSize = 0; // Size of the previous sample + uint32_t previousSampleSize = 0; // Size of the previous sample int64_t previousPausedDurationUs = 0; int64_t timestampUs = 0; - int64_t cttsDeltaTimeUs = 0; - bool hasBFrames = false; + int64_t cttsOffsetTimeUs = 0; + int64_t currCttsOffsetTimeTicks = 0; // Timescale based ticks + int64_t lastCttsOffsetTimeTicks = -1; // Timescale based ticks + int32_t cttsSampleCount = 0; // Sample count in the current ctts table entry -#if 1 - // XXX: Samsung's video encoder's output buffer timestamp - // is not correct. see bug 4724339 - char value[PROPERTY_VALUE_MAX]; - if (property_get("rw.media.record.hasb", value, NULL) && - (!strcasecmp(value, "true") || !strcasecmp(value, "1"))) { - hasBFrames = true; - } -#endif if (mIsAudio) { prctl(PR_SET_NAME, (unsigned long)"AudioTrackEncoding", 0, 0, 0); } else { @@ -1897,7 +1889,7 @@ status_t MPEG4Writer::Track::threadEntry() { (const uint8_t *)buffer->data() + buffer->range_offset(), buffer->range_length()); - CHECK_EQ(OK, err); + CHECK_EQ((status_t)OK, err); } else if (mIsMPEG4) { mCodecSpecificDataSize = buffer->range_length(); mCodecSpecificData = malloc(mCodecSpecificDataSize); @@ -1963,32 +1955,64 @@ status_t MPEG4Writer::Track::threadEntry() { if (mResumed) { int64_t durExcludingEarlierPausesUs = timestampUs - previousPausedDurationUs; - CHECK(durExcludingEarlierPausesUs >= 0); + CHECK_GE(durExcludingEarlierPausesUs, 0ll); int64_t pausedDurationUs = durExcludingEarlierPausesUs - mTrackDurationUs; - CHECK(pausedDurationUs >= lastDurationUs); + CHECK_GE(pausedDurationUs, lastDurationUs); previousPausedDurationUs += pausedDurationUs - lastDurationUs; mResumed = false; } timestampUs -= previousPausedDurationUs; - CHECK(timestampUs >= 0); - if (!mIsAudio && hasBFrames) { + CHECK_GE(timestampUs, 0ll); + if (!mIsAudio) { /* * Composition time: timestampUs * Decoding time: decodingTimeUs - * Composition time delta = composition time - decoding time - * - * We save picture decoding time stamp delta in stts table entries, - * and composition time delta duration in ctts table entries. + * Composition time offset = composition time - decoding time */ int64_t decodingTimeUs; CHECK(meta_data->findInt64(kKeyDecodingTime, &decodingTimeUs)); decodingTimeUs -= previousPausedDurationUs; - int64_t timeUs = decodingTimeUs; - cttsDeltaTimeUs = timestampUs - decodingTimeUs; + cttsOffsetTimeUs = + timestampUs + kMaxCttsOffsetTimeUs - decodingTimeUs; + CHECK_GE(cttsOffsetTimeUs, 0ll); timestampUs = decodingTimeUs; - ALOGV("decoding time: %lld and ctts delta time: %lld", - timestampUs, cttsDeltaTimeUs); + ALOGV("decoding time: %lld and ctts offset time: %lld", + timestampUs, cttsOffsetTimeUs); + + // Update ctts box table if necessary + currCttsOffsetTimeTicks = + (cttsOffsetTimeUs * mTimeScale + 500000LL) / 1000000LL; + CHECK_LE(currCttsOffsetTimeTicks, 0x0FFFFFFFFLL); + if (mNumSamples == 0) { + // Force the first ctts table entry to have one single entry + // so that we can do adjustment for the initial track start + // time offset easily in writeCttsBox(). + lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks; + addOneCttsTableEntry(1, currCttsOffsetTimeTicks); + cttsSampleCount = 0; // No sample in ctts box is pending + } else { + if (currCttsOffsetTimeTicks != lastCttsOffsetTimeTicks) { + addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks); + lastCttsOffsetTimeTicks = currCttsOffsetTimeTicks; + cttsSampleCount = 1; // One sample in ctts box is pending + } else { + ++cttsSampleCount; + } + } + + // Update ctts time offset range + if (mNumSamples == 0) { + mMinCttsOffsetTimeUs = currCttsOffsetTimeTicks; + mMaxCttsOffsetTimeUs = currCttsOffsetTimeTicks; + } else { + if (currCttsOffsetTimeTicks > mMaxCttsOffsetTimeUs) { + mMaxCttsOffsetTimeUs = currCttsOffsetTimeTicks; + } else if (currCttsOffsetTimeTicks < mMinCttsOffsetTimeUs) { + mMinCttsOffsetTimeUs = currCttsOffsetTimeTicks; + } + } + } if (mIsRealTimeRecording) { @@ -1997,7 +2021,7 @@ status_t MPEG4Writer::Track::threadEntry() { } } - CHECK(timestampUs >= 0); + CHECK_GE(timestampUs, 0ll); ALOGV("%s media time stamp: %lld and previous paused duration %lld", mIsAudio? "Audio": "Video", timestampUs, previousPausedDurationUs); if (timestampUs > mTrackDurationUs) { @@ -2012,6 +2036,7 @@ status_t MPEG4Writer::Track::threadEntry() { currDurationTicks = ((timestampUs * mTimeScale + 500000LL) / 1000000LL - (lastTimestampUs * mTimeScale + 500000LL) / 1000000LL); + CHECK_GE(currDurationTicks, 0ll); mSampleSizes.push_back(sampleSize); ++mNumSamples; @@ -2020,25 +2045,12 @@ status_t MPEG4Writer::Track::threadEntry() { // Force the first sample to have its own stts entry so that // we can adjust its value later to maintain the A/V sync. if (mNumSamples == 3 || currDurationTicks != lastDurationTicks) { - ALOGV("%s lastDurationUs: %lld us, currDurationTicks: %lld us", - mIsAudio? "Audio": "Video", lastDurationUs, currDurationTicks); addOneSttsTableEntry(sampleCount, lastDurationTicks); sampleCount = 1; } else { ++sampleCount; } - if (!mIsAudio) { - currCttsDurTicks = - ((cttsDeltaTimeUs * mTimeScale + 500000LL) / 1000000LL - - (lastCttsTimeUs * mTimeScale + 500000LL) / 1000000LL); - if (currCttsDurTicks != lastCttsDurTicks) { - addOneCttsTableEntry(cttsSampleCount, lastCttsDurTicks); - cttsSampleCount = 1; - } else { - ++cttsSampleCount; - } - } } if (mSamplesHaveSameSize) { if (mNumSamples >= 2 && previousSampleSize != sampleSize) { @@ -2052,11 +2064,6 @@ status_t MPEG4Writer::Track::threadEntry() { lastDurationTicks = currDurationTicks; lastTimestampUs = timestampUs; - if (!mIsAudio) { - lastCttsDurTicks = currCttsDurTicks; - lastCttsTimeUs = cttsDeltaTimeUs; - } - if (isSync != 0) { addOneStssTableEntry(mNumSamples); } @@ -2125,10 +2132,8 @@ status_t MPEG4Writer::Track::threadEntry() { if (mNumSamples == 1) { lastDurationUs = 0; // A single sample's duration lastDurationTicks = 0; - lastCttsDurTicks = 0; } else { ++sampleCount; // Count for the last sample - ++cttsSampleCount; } if (mNumSamples <= 2) { @@ -2140,7 +2145,14 @@ status_t MPEG4Writer::Track::threadEntry() { addOneSttsTableEntry(sampleCount, lastDurationTicks); } - addOneCttsTableEntry(cttsSampleCount, lastCttsDurTicks); + // The last ctts box may not have been written yet, and this + // is to make sure that we write out the last ctts box. + if (currCttsOffsetTimeTicks == lastCttsOffsetTimeTicks) { + if (cttsSampleCount > 0) { + addOneCttsTableEntry(cttsSampleCount, lastCttsOffsetTimeTicks); + } + } + mTrackDurationUs += lastDurationUs; mReachedEOS = true; @@ -2406,7 +2418,7 @@ void MPEG4Writer::Track::writeVideoFourCCBox() { mOwner->writeInt16(0x18); // depth mOwner->writeInt16(-1); // predefined - CHECK(23 + mCodecSpecificDataSize < 128); + CHECK_LT(23 + mCodecSpecificDataSize, 128); if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) { writeMp4vEsdsBox(); @@ -2465,10 +2477,10 @@ void MPEG4Writer::Track::writeAudioFourCCBox() { void MPEG4Writer::Track::writeMp4aEsdsBox() { mOwner->beginBox("esds"); CHECK(mCodecSpecificData); - CHECK(mCodecSpecificDataSize > 0); + CHECK_GT(mCodecSpecificDataSize, 0); // Make sure all sizes encode to a single byte. - CHECK(mCodecSpecificDataSize + 23 < 128); + CHECK_LT(mCodecSpecificDataSize + 23, 128); mOwner->writeInt32(0); // version=0, flags=0 mOwner->writeInt8(0x03); // ES_DescrTag @@ -2502,7 +2514,7 @@ void MPEG4Writer::Track::writeMp4aEsdsBox() { void MPEG4Writer::Track::writeMp4vEsdsBox() { CHECK(mCodecSpecificData); - CHECK(mCodecSpecificDataSize > 0); + CHECK_GT(mCodecSpecificDataSize, 0); mOwner->beginBox("esds"); mOwner->writeInt32(0); // version=0, flags=0 @@ -2662,7 +2674,7 @@ void MPEG4Writer::Track::writeDinfBox() { void MPEG4Writer::Track::writeAvccBox() { CHECK(mCodecSpecificData); - CHECK(mCodecSpecificDataSize >= 5); + CHECK_GE(mCodecSpecificDataSize, 5); // Patch avcc's lengthSize field to match the number // of bytes we use to indicate the size of a nal unit. @@ -2690,23 +2702,26 @@ void MPEG4Writer::Track::writePaspBox() { mOwner->endBox(); // pasp } +int32_t MPEG4Writer::Track::getStartTimeOffsetScaledTime() const { + int64_t trackStartTimeOffsetUs = 0; + int64_t moovStartTimeUs = mOwner->getStartTimestampUs(); + if (mStartTimestampUs != moovStartTimeUs) { + CHECK_GT(mStartTimestampUs, moovStartTimeUs); + trackStartTimeOffsetUs = mStartTimestampUs - moovStartTimeUs; + } + return (trackStartTimeOffsetUs * mTimeScale + 500000LL) / 1000000LL; +} + void MPEG4Writer::Track::writeSttsBox() { mOwner->beginBox("stts"); mOwner->writeInt32(0); // version=0, flags=0 mOwner->writeInt32(mNumSttsTableEntries); // Compensate for small start time difference from different media tracks - int64_t trackStartTimeOffsetUs = 0; - int64_t moovStartTimeUs = mOwner->getStartTimestampUs(); - if (mStartTimestampUs != moovStartTimeUs) { - CHECK(mStartTimestampUs > moovStartTimeUs); - trackStartTimeOffsetUs = mStartTimestampUs - moovStartTimeUs; - } List<SttsTableEntry>::iterator it = mSttsTableEntries.begin(); CHECK(it != mSttsTableEntries.end() && it->sampleCount == 1); mOwner->writeInt32(it->sampleCount); - int32_t dur = (trackStartTimeOffsetUs * mTimeScale + 500000LL) / 1000000LL; - mOwner->writeInt32(dur + it->sampleDuration); + mOwner->writeInt32(getStartTimeOffsetScaledTime() + it->sampleDuration); int64_t totalCount = 1; while (++it != mSttsTableEntries.end()) { @@ -2714,7 +2729,7 @@ void MPEG4Writer::Track::writeSttsBox() { mOwner->writeInt32(it->sampleDuration); totalCount += it->sampleCount; } - CHECK(totalCount == mNumSamples); + CHECK_EQ(totalCount, mNumSamples); mOwner->endBox(); // stts } @@ -2723,6 +2738,11 @@ void MPEG4Writer::Track::writeCttsBox() { return; } + // There is no B frame at all + if (mMinCttsOffsetTimeUs == mMaxCttsOffsetTimeUs) { + return; + } + // Do not write ctts box when there is no need to have it. if ((mNumCttsTableEntries == 1 && mCttsTableEntries.begin()->sampleDuration == 0) || @@ -2730,24 +2750,29 @@ void MPEG4Writer::Track::writeCttsBox() { return; } - ALOGV("ctts box has %d entries", mNumCttsTableEntries); + ALOGD("ctts box has %d entries with range [%lld, %lld]", + mNumCttsTableEntries, mMinCttsOffsetTimeUs, mMaxCttsOffsetTimeUs); mOwner->beginBox("ctts"); - if (mHasNegativeCttsDeltaDuration) { - mOwner->writeInt32(0x00010000); // version=1, flags=0 - } else { - mOwner->writeInt32(0); // version=0, flags=0 - } + // Version 1 allows to use negative offset time value, but + // we are sticking to version 0 for now. + mOwner->writeInt32(0); // version=0, flags=0 mOwner->writeInt32(mNumCttsTableEntries); - int64_t totalCount = 0; - for (List<CttsTableEntry>::iterator it = mCttsTableEntries.begin(); - it != mCttsTableEntries.end(); ++it) { + // Compensate for small start time difference from different media tracks + List<CttsTableEntry>::iterator it = mCttsTableEntries.begin(); + CHECK(it != mCttsTableEntries.end() && it->sampleCount == 1); + mOwner->writeInt32(it->sampleCount); + mOwner->writeInt32(getStartTimeOffsetScaledTime() + + it->sampleDuration - mMinCttsOffsetTimeUs); + + int64_t totalCount = 1; + while (++it != mCttsTableEntries.end()) { mOwner->writeInt32(it->sampleCount); - mOwner->writeInt32(it->sampleDuration); + mOwner->writeInt32(it->sampleDuration - mMinCttsOffsetTimeUs); totalCount += it->sampleCount; } - CHECK(totalCount == mNumSamples); + CHECK_EQ(totalCount, mNumSamples); mOwner->endBox(); // ctts } diff --git a/media/libstagefright/MediaBuffer.cpp b/media/libstagefright/MediaBuffer.cpp index 96271e4..11b80bf 100644 --- a/media/libstagefright/MediaBuffer.cpp +++ b/media/libstagefright/MediaBuffer.cpp @@ -22,8 +22,8 @@ #include <stdlib.h> #include <media/stagefright/foundation/ABuffer.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MediaBuffer.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MetaData.h> #include <ui/GraphicBuffer.h> @@ -157,7 +157,7 @@ void MediaBuffer::reset() { } MediaBuffer::~MediaBuffer() { - CHECK_EQ(mObserver, NULL); + CHECK(mObserver == NULL); if (mOwnsData && mData != NULL) { free(mData); @@ -188,7 +188,7 @@ int MediaBuffer::refcount() const { } MediaBuffer *MediaBuffer::clone() { - CHECK_EQ(mGraphicBuffer, NULL); + CHECK(mGraphicBuffer == NULL); MediaBuffer *buffer = new MediaBuffer(mData, mSize); buffer->set_range(mRangeOffset, mRangeLength); diff --git a/media/libstagefright/MediaBufferGroup.cpp b/media/libstagefright/MediaBufferGroup.cpp index c8d05f4..80aae51 100644 --- a/media/libstagefright/MediaBufferGroup.cpp +++ b/media/libstagefright/MediaBufferGroup.cpp @@ -17,9 +17,9 @@ #define LOG_TAG "MediaBufferGroup" #include <utils/Log.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MediaBuffer.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> namespace android { diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp new file mode 100644 index 0000000..a9e7f36 --- /dev/null +++ b/media/libstagefright/MediaCodec.cpp @@ -0,0 +1,1217 @@ +/* + * Copyright 2012, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "MediaCodec" +#include <utils/Log.h> + +#include <media/stagefright/MediaCodec.h> + +#include "include/SoftwareRenderer.h" + +#include <gui/SurfaceTextureClient.h> +#include <media/stagefright/foundation/ABuffer.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/ACodec.h> +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MetaData.h> +#include <media/stagefright/NativeWindowWrapper.h> + +namespace android { + +// static +sp<MediaCodec> MediaCodec::CreateByType( + const sp<ALooper> &looper, const char *mime, bool encoder) { + sp<MediaCodec> codec = new MediaCodec(looper); + if (codec->init(mime, true /* nameIsType */, encoder) != OK) { + return NULL; + } + + return codec; +} + +// static +sp<MediaCodec> MediaCodec::CreateByComponentName( + const sp<ALooper> &looper, const char *name) { + sp<MediaCodec> codec = new MediaCodec(looper); + if (codec->init(name, false /* nameIsType */, false /* encoder */) != OK) { + return NULL; + } + + return codec; +} + +MediaCodec::MediaCodec(const sp<ALooper> &looper) + : mState(UNINITIALIZED), + mLooper(looper), + mCodec(new ACodec), + mFlags(0), + mSoftRenderer(NULL), + mDequeueInputTimeoutGeneration(0), + mDequeueInputReplyID(0), + mDequeueOutputTimeoutGeneration(0), + mDequeueOutputReplyID(0) { +} + +MediaCodec::~MediaCodec() { + CHECK_EQ(mState, UNINITIALIZED); +} + +// static +status_t MediaCodec::PostAndAwaitResponse( + const sp<AMessage> &msg, sp<AMessage> *response) { + status_t err = msg->postAndAwaitResponse(response); + + if (err != OK) { + return err; + } + + if (!(*response)->findInt32("err", &err)) { + err = OK; + } + + return err; +} + +status_t MediaCodec::init(const char *name, bool nameIsType, bool encoder) { + // Current video decoders do not return from OMX_FillThisBuffer + // quickly, violating the OpenMAX specs, until that is remedied + // we need to invest in an extra looper to free the main event + // queue. + bool needDedicatedLooper = false; + if (nameIsType && !strncasecmp(name, "video/", 6)) { + needDedicatedLooper = true; + } else if (!nameIsType && !strncmp(name, "OMX.TI.DUCATI1.VIDEO.", 21)) { + needDedicatedLooper = true; + } + + if (needDedicatedLooper) { + if (mCodecLooper == NULL) { + mCodecLooper = new ALooper; + mCodecLooper->setName("CodecLooper"); + mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO); + } + + mCodecLooper->registerHandler(mCodec); + } else { + mLooper->registerHandler(mCodec); + } + + mLooper->registerHandler(this); + + mCodec->setNotificationMessage(new AMessage(kWhatCodecNotify, id())); + + sp<AMessage> msg = new AMessage(kWhatInit, id()); + msg->setString("name", name); + msg->setInt32("nameIsType", nameIsType); + + if (nameIsType) { + msg->setInt32("encoder", encoder); + } + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::configure( + const sp<AMessage> &format, + const sp<SurfaceTextureClient> &nativeWindow, + uint32_t flags) { + sp<AMessage> msg = new AMessage(kWhatConfigure, id()); + + msg->setMessage("format", format); + msg->setInt32("flags", flags); + + if (nativeWindow != NULL) { + if (!(mFlags & kFlagIsSoftwareCodec)) { + msg->setObject( + "native-window", + new NativeWindowWrapper(nativeWindow)); + } else { + mNativeWindow = nativeWindow; + } + } + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::start() { + sp<AMessage> msg = new AMessage(kWhatStart, id()); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::stop() { + sp<AMessage> msg = new AMessage(kWhatStop, id()); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::release() { + sp<AMessage> msg = new AMessage(kWhatRelease, id()); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::queueInputBuffer( + size_t index, + size_t offset, + size_t size, + int64_t presentationTimeUs, + uint32_t flags) { + sp<AMessage> msg = new AMessage(kWhatQueueInputBuffer, id()); + msg->setSize("index", index); + msg->setSize("offset", offset); + msg->setSize("size", size); + msg->setInt64("timeUs", presentationTimeUs); + msg->setInt32("flags", flags); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::dequeueInputBuffer(size_t *index, int64_t timeoutUs) { + sp<AMessage> msg = new AMessage(kWhatDequeueInputBuffer, id()); + msg->setInt64("timeoutUs", timeoutUs); + + sp<AMessage> response; + status_t err; + if ((err = PostAndAwaitResponse(msg, &response)) != OK) { + return err; + } + + CHECK(response->findSize("index", index)); + + return OK; +} + +status_t MediaCodec::dequeueOutputBuffer( + size_t *index, + size_t *offset, + size_t *size, + int64_t *presentationTimeUs, + uint32_t *flags, + int64_t timeoutUs) { + sp<AMessage> msg = new AMessage(kWhatDequeueOutputBuffer, id()); + msg->setInt64("timeoutUs", timeoutUs); + + sp<AMessage> response; + status_t err; + if ((err = PostAndAwaitResponse(msg, &response)) != OK) { + return err; + } + + CHECK(response->findSize("index", index)); + CHECK(response->findSize("offset", offset)); + CHECK(response->findSize("size", size)); + CHECK(response->findInt64("timeUs", presentationTimeUs)); + CHECK(response->findInt32("flags", (int32_t *)flags)); + + return OK; +} + +status_t MediaCodec::renderOutputBufferAndRelease(size_t index) { + sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id()); + msg->setSize("index", index); + msg->setInt32("render", true); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::releaseOutputBuffer(size_t index) { + sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id()); + msg->setSize("index", index); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::getOutputFormat(sp<AMessage> *format) const { + sp<AMessage> msg = new AMessage(kWhatGetOutputFormat, id()); + + sp<AMessage> response; + status_t err; + if ((err = PostAndAwaitResponse(msg, &response)) != OK) { + return err; + } + + CHECK(response->findMessage("format", format)); + + return OK; +} + +status_t MediaCodec::getInputBuffers(Vector<sp<ABuffer> > *buffers) const { + sp<AMessage> msg = new AMessage(kWhatGetBuffers, id()); + msg->setInt32("portIndex", kPortIndexInput); + msg->setPointer("buffers", buffers); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::getOutputBuffers(Vector<sp<ABuffer> > *buffers) const { + sp<AMessage> msg = new AMessage(kWhatGetBuffers, id()); + msg->setInt32("portIndex", kPortIndexOutput); + msg->setPointer("buffers", buffers); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +status_t MediaCodec::flush() { + sp<AMessage> msg = new AMessage(kWhatFlush, id()); + + sp<AMessage> response; + return PostAndAwaitResponse(msg, &response); +} + +//////////////////////////////////////////////////////////////////////////////// + +void MediaCodec::cancelPendingDequeueOperations() { + if (mFlags & kFlagDequeueInputPending) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + response->postReply(mDequeueInputReplyID); + + ++mDequeueInputTimeoutGeneration; + mDequeueInputReplyID = 0; + mFlags &= ~kFlagDequeueInputPending; + } + + if (mFlags & kFlagDequeueOutputPending) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + response->postReply(mDequeueOutputReplyID); + + ++mDequeueOutputTimeoutGeneration; + mDequeueOutputReplyID = 0; + mFlags &= ~kFlagDequeueOutputPending; + } +} + +bool MediaCodec::handleDequeueInputBuffer(uint32_t replyID, bool newRequest) { + if (mState != STARTED + || (mFlags & kFlagStickyError) + || (newRequest && (mFlags & kFlagDequeueInputPending))) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + + return true; + } + + ssize_t index = dequeuePortBuffer(kPortIndexInput); + + if (index < 0) { + CHECK_EQ(index, -EAGAIN); + return false; + } + + sp<AMessage> response = new AMessage; + response->setSize("index", index); + response->postReply(replyID); + + return true; +} + +bool MediaCodec::handleDequeueOutputBuffer(uint32_t replyID, bool newRequest) { + sp<AMessage> response = new AMessage; + + if (mState != STARTED + || (mFlags & kFlagStickyError) + || (newRequest && (mFlags & kFlagDequeueOutputPending))) { + response->setInt32("err", INVALID_OPERATION); + } else if (mFlags & kFlagOutputBuffersChanged) { + response->setInt32("err", INFO_OUTPUT_BUFFERS_CHANGED); + mFlags &= ~kFlagOutputBuffersChanged; + } else if (mFlags & kFlagOutputFormatChanged) { + response->setInt32("err", INFO_FORMAT_CHANGED); + mFlags &= ~kFlagOutputFormatChanged; + } else { + ssize_t index = dequeuePortBuffer(kPortIndexOutput); + + if (index < 0) { + CHECK_EQ(index, -EAGAIN); + return false; + } + + const sp<ABuffer> &buffer = + mPortBuffers[kPortIndexOutput].itemAt(index).mData; + + response->setSize("index", index); + response->setSize("offset", buffer->offset()); + response->setSize("size", buffer->size()); + + int64_t timeUs; + CHECK(buffer->meta()->findInt64("timeUs", &timeUs)); + + response->setInt64("timeUs", timeUs); + + int32_t omxFlags; + CHECK(buffer->meta()->findInt32("omxFlags", &omxFlags)); + + uint32_t flags = 0; + if (omxFlags & OMX_BUFFERFLAG_SYNCFRAME) { + flags |= BUFFER_FLAG_SYNCFRAME; + } + if (omxFlags & OMX_BUFFERFLAG_CODECCONFIG) { + flags |= BUFFER_FLAG_CODECCONFIG; + } + if (omxFlags & OMX_BUFFERFLAG_EOS) { + flags |= BUFFER_FLAG_EOS; + } + + response->setInt32("flags", flags); + } + + response->postReply(replyID); + + return true; +} + +void MediaCodec::onMessageReceived(const sp<AMessage> &msg) { + switch (msg->what()) { + case kWhatCodecNotify: + { + int32_t what; + CHECK(msg->findInt32("what", &what)); + + switch (what) { + case ACodec::kWhatError: + { + int32_t omxError, internalError; + CHECK(msg->findInt32("omx-error", &omxError)); + CHECK(msg->findInt32("err", &internalError)); + + ALOGE("Codec reported an error. " + "(omx error 0x%08x, internalError %d)", + omxError, internalError); + + bool sendErrorReponse = true; + + switch (mState) { + case INITIALIZING: + { + setState(UNINITIALIZED); + break; + } + + case CONFIGURING: + { + setState(INITIALIZED); + break; + } + + case STARTING: + { + setState(CONFIGURED); + break; + } + + case STOPPING: + case RELEASING: + { + // Ignore the error, assuming we'll still get + // the shutdown complete notification. + + sendErrorReponse = false; + break; + } + + case FLUSHING: + { + setState(STARTED); + break; + } + + case STARTED: + { + sendErrorReponse = false; + + mFlags |= kFlagStickyError; + + cancelPendingDequeueOperations(); + break; + } + + default: + { + sendErrorReponse = false; + + mFlags |= kFlagStickyError; + break; + } + } + + if (sendErrorReponse) { + sp<AMessage> response = new AMessage; + response->setInt32("err", UNKNOWN_ERROR); + + response->postReply(mReplyID); + } + break; + } + + case ACodec::kWhatComponentAllocated: + { + CHECK_EQ(mState, INITIALIZING); + setState(INITIALIZED); + + AString componentName; + CHECK(msg->findString("componentName", &componentName)); + + if (componentName.startsWith("OMX.google.")) { + mFlags |= kFlagIsSoftwareCodec; + } else { + mFlags &= ~kFlagIsSoftwareCodec; + } + + (new AMessage)->postReply(mReplyID); + break; + } + + case ACodec::kWhatComponentConfigured: + { + CHECK_EQ(mState, CONFIGURING); + setState(CONFIGURED); + + (new AMessage)->postReply(mReplyID); + break; + } + + case ACodec::kWhatBuffersAllocated: + { + int32_t portIndex; + CHECK(msg->findInt32("portIndex", &portIndex)); + + ALOGV("%s buffers allocated", + portIndex == kPortIndexInput ? "input" : "output"); + + CHECK(portIndex == kPortIndexInput + || portIndex == kPortIndexOutput); + + mPortBuffers[portIndex].clear(); + + Vector<BufferInfo> *buffers = &mPortBuffers[portIndex]; + for (size_t i = 0;; ++i) { + AString name = StringPrintf("buffer-id_%d", i); + + void *bufferID; + if (!msg->findPointer(name.c_str(), &bufferID)) { + break; + } + + name = StringPrintf("data_%d", i); + + BufferInfo info; + info.mBufferID = bufferID; + info.mOwnedByClient = false; + CHECK(msg->findBuffer(name.c_str(), &info.mData)); + + buffers->push_back(info); + } + + if (portIndex == kPortIndexOutput) { + if (mState == STARTING) { + // We're always allocating output buffers after + // allocating input buffers, so this is a good + // indication that now all buffers are allocated. + setState(STARTED); + (new AMessage)->postReply(mReplyID); + } else { + mFlags |= kFlagOutputBuffersChanged; + } + } + break; + } + + case ACodec::kWhatOutputFormatChanged: + { + ALOGV("codec output format changed"); + + if ((mFlags & kFlagIsSoftwareCodec) + && mNativeWindow != NULL) { + AString mime; + CHECK(msg->findString("mime", &mime)); + + if (!strncasecmp("video/", mime.c_str(), 6)) { + delete mSoftRenderer; + mSoftRenderer = NULL; + + int32_t width, height; + CHECK(msg->findInt32("width", &width)); + CHECK(msg->findInt32("height", &height)); + + int32_t colorFormat; + CHECK(msg->findInt32( + "color-format", &colorFormat)); + + sp<MetaData> meta = new MetaData; + meta->setInt32(kKeyWidth, width); + meta->setInt32(kKeyHeight, height); + meta->setInt32(kKeyColorFormat, colorFormat); + + mSoftRenderer = + new SoftwareRenderer(mNativeWindow, meta); + } + } + + mOutputFormat = msg; + mFlags |= kFlagOutputFormatChanged; + break; + } + + case ACodec::kWhatFillThisBuffer: + { + /* size_t index = */updateBuffers(kPortIndexInput, msg); + + if (mState == FLUSHING + || mState == STOPPING + || mState == RELEASING) { + returnBuffersToCodecOnPort(kPortIndexInput); + break; + } + + if (mFlags & kFlagDequeueInputPending) { + CHECK(handleDequeueInputBuffer(mDequeueInputReplyID)); + + ++mDequeueInputTimeoutGeneration; + mFlags &= ~kFlagDequeueInputPending; + mDequeueInputReplyID = 0; + } + break; + } + + case ACodec::kWhatDrainThisBuffer: + { + /* size_t index = */updateBuffers(kPortIndexOutput, msg); + + if (mState == FLUSHING + || mState == STOPPING + || mState == RELEASING) { + returnBuffersToCodecOnPort(kPortIndexOutput); + break; + } + + sp<ABuffer> buffer; + CHECK(msg->findBuffer("buffer", &buffer)); + + int32_t omxFlags; + CHECK(msg->findInt32("flags", &omxFlags)); + + buffer->meta()->setInt32("omxFlags", omxFlags); + + if (mFlags & kFlagDequeueOutputPending) { + CHECK(handleDequeueOutputBuffer(mDequeueOutputReplyID)); + + ++mDequeueOutputTimeoutGeneration; + mFlags &= ~kFlagDequeueOutputPending; + mDequeueOutputReplyID = 0; + } + break; + } + + case ACodec::kWhatEOS: + { + // We already notify the client of this by using the + // corresponding flag in "onOutputBufferReady". + break; + } + + case ACodec::kWhatShutdownCompleted: + { + if (mState == STOPPING) { + setState(INITIALIZED); + } else { + CHECK_EQ(mState, RELEASING); + setState(UNINITIALIZED); + } + + (new AMessage)->postReply(mReplyID); + break; + } + + case ACodec::kWhatFlushCompleted: + { + CHECK_EQ(mState, FLUSHING); + setState(STARTED); + + mCodec->signalResume(); + + (new AMessage)->postReply(mReplyID); + break; + } + + default: + TRESPASS(); + } + break; + } + + case kWhatInit: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != UNINITIALIZED) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + mReplyID = replyID; + setState(INITIALIZING); + + AString name; + CHECK(msg->findString("name", &name)); + + int32_t nameIsType; + int32_t encoder = false; + CHECK(msg->findInt32("nameIsType", &nameIsType)); + if (nameIsType) { + CHECK(msg->findInt32("encoder", &encoder)); + } + + sp<AMessage> format = new AMessage; + + if (nameIsType) { + format->setString("mime", name.c_str()); + format->setInt32("encoder", encoder); + } else { + format->setString("componentName", name.c_str()); + } + + mCodec->initiateAllocateComponent(format); + break; + } + + case kWhatConfigure: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != INITIALIZED) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + mReplyID = replyID; + setState(CONFIGURING); + + sp<RefBase> obj; + if (!msg->findObject("native-window", &obj)) { + obj.clear(); + } + + sp<AMessage> format; + CHECK(msg->findMessage("format", &format)); + + if (obj != NULL) { + format->setObject("native-window", obj); + } + + uint32_t flags; + CHECK(msg->findInt32("flags", (int32_t *)&flags)); + + if (flags & CONFIGURE_FLAG_ENCODE) { + format->setInt32("encoder", true); + } + + mCodec->initiateConfigureComponent(format); + break; + } + + case kWhatStart: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != CONFIGURED) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + mReplyID = replyID; + setState(STARTING); + + mCodec->initiateStart(); + break; + } + + case kWhatStop: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != INITIALIZED + && mState != CONFIGURED && mState != STARTED) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + mReplyID = replyID; + setState(STOPPING); + + mCodec->initiateShutdown(true /* keepComponentAllocated */); + returnBuffersToCodec(); + break; + } + + case kWhatRelease: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != INITIALIZED + && mState != CONFIGURED && mState != STARTED) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + mReplyID = replyID; + setState(RELEASING); + + mCodec->initiateShutdown(); + returnBuffersToCodec(); + break; + } + + case kWhatDequeueInputBuffer: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (handleDequeueInputBuffer(replyID, true /* new request */)) { + break; + } + + int64_t timeoutUs; + CHECK(msg->findInt64("timeoutUs", &timeoutUs)); + + if (timeoutUs == 0ll) { + sp<AMessage> response = new AMessage; + response->setInt32("err", -EAGAIN); + response->postReply(replyID); + break; + } + + mFlags |= kFlagDequeueInputPending; + mDequeueInputReplyID = replyID; + + if (timeoutUs > 0ll) { + sp<AMessage> timeoutMsg = + new AMessage(kWhatDequeueInputTimedOut, id()); + timeoutMsg->setInt32( + "generation", ++mDequeueInputTimeoutGeneration); + timeoutMsg->post(timeoutUs); + } + break; + } + + case kWhatDequeueInputTimedOut: + { + int32_t generation; + CHECK(msg->findInt32("generation", &generation)); + + if (generation != mDequeueInputTimeoutGeneration) { + // Obsolete + break; + } + + CHECK(mFlags & kFlagDequeueInputPending); + + sp<AMessage> response = new AMessage; + response->setInt32("err", -EAGAIN); + response->postReply(mDequeueInputReplyID); + + mFlags &= ~kFlagDequeueInputPending; + mDequeueInputReplyID = 0; + break; + } + + case kWhatQueueInputBuffer: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != STARTED || (mFlags & kFlagStickyError)) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + status_t err = onQueueInputBuffer(msg); + + sp<AMessage> response = new AMessage; + response->setInt32("err", err); + response->postReply(replyID); + break; + } + + case kWhatDequeueOutputBuffer: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (handleDequeueOutputBuffer(replyID, true /* new request */)) { + break; + } + + int64_t timeoutUs; + CHECK(msg->findInt64("timeoutUs", &timeoutUs)); + + if (timeoutUs == 0ll) { + sp<AMessage> response = new AMessage; + response->setInt32("err", -EAGAIN); + response->postReply(replyID); + break; + } + + mFlags |= kFlagDequeueOutputPending; + mDequeueOutputReplyID = replyID; + + if (timeoutUs > 0ll) { + sp<AMessage> timeoutMsg = + new AMessage(kWhatDequeueOutputTimedOut, id()); + timeoutMsg->setInt32( + "generation", ++mDequeueOutputTimeoutGeneration); + timeoutMsg->post(timeoutUs); + } + break; + } + + case kWhatDequeueOutputTimedOut: + { + int32_t generation; + CHECK(msg->findInt32("generation", &generation)); + + if (generation != mDequeueOutputTimeoutGeneration) { + // Obsolete + break; + } + + CHECK(mFlags & kFlagDequeueOutputPending); + + sp<AMessage> response = new AMessage; + response->setInt32("err", -EAGAIN); + response->postReply(mDequeueOutputReplyID); + + mFlags &= ~kFlagDequeueOutputPending; + mDequeueOutputReplyID = 0; + break; + } + + case kWhatReleaseOutputBuffer: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != STARTED || (mFlags & kFlagStickyError)) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + status_t err = onReleaseOutputBuffer(msg); + + sp<AMessage> response = new AMessage; + response->setInt32("err", err); + response->postReply(replyID); + break; + } + + case kWhatGetBuffers: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != STARTED || (mFlags & kFlagStickyError)) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + int32_t portIndex; + CHECK(msg->findInt32("portIndex", &portIndex)); + + Vector<sp<ABuffer> > *dstBuffers; + CHECK(msg->findPointer("buffers", (void **)&dstBuffers)); + + dstBuffers->clear(); + const Vector<BufferInfo> &srcBuffers = mPortBuffers[portIndex]; + + for (size_t i = 0; i < srcBuffers.size(); ++i) { + const BufferInfo &info = srcBuffers.itemAt(i); + + dstBuffers->push_back(info.mData); + } + + (new AMessage)->postReply(replyID); + break; + } + + case kWhatFlush: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if (mState != STARTED || (mFlags & kFlagStickyError)) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + mReplyID = replyID; + setState(FLUSHING); + + mCodec->signalFlush(); + returnBuffersToCodec(); + break; + } + + case kWhatGetOutputFormat: + { + uint32_t replyID; + CHECK(msg->senderAwaitsResponse(&replyID)); + + if ((mState != STARTED && mState != FLUSHING) + || (mFlags & kFlagStickyError)) { + sp<AMessage> response = new AMessage; + response->setInt32("err", INVALID_OPERATION); + + response->postReply(replyID); + break; + } + + sp<AMessage> response = new AMessage; + response->setMessage("format", mOutputFormat); + response->postReply(replyID); + break; + } + + default: + TRESPASS(); + } +} + +void MediaCodec::setState(State newState) { + if (newState == UNINITIALIZED) { + delete mSoftRenderer; + mSoftRenderer = NULL; + + mNativeWindow.clear(); + + mOutputFormat.clear(); + mFlags &= ~kFlagOutputFormatChanged; + mFlags &= ~kFlagOutputBuffersChanged; + mFlags &= ~kFlagStickyError; + } + + mState = newState; + + cancelPendingDequeueOperations(); +} + +void MediaCodec::returnBuffersToCodec() { + returnBuffersToCodecOnPort(kPortIndexInput); + returnBuffersToCodecOnPort(kPortIndexOutput); +} + +void MediaCodec::returnBuffersToCodecOnPort(int32_t portIndex) { + CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput); + + Vector<BufferInfo> *buffers = &mPortBuffers[portIndex]; + + for (size_t i = 0; i < buffers->size(); ++i) { + BufferInfo *info = &buffers->editItemAt(i); + + if (info->mNotify != NULL) { + sp<AMessage> msg = info->mNotify; + info->mNotify = NULL; + info->mOwnedByClient = false; + + if (portIndex == kPortIndexInput) { + msg->setInt32("err", ERROR_END_OF_STREAM); + } + msg->post(); + } + } + + mAvailPortBuffers[portIndex].clear(); +} + +size_t MediaCodec::updateBuffers( + int32_t portIndex, const sp<AMessage> &msg) { + CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput); + + void *bufferID; + CHECK(msg->findPointer("buffer-id", &bufferID)); + + Vector<BufferInfo> *buffers = &mPortBuffers[portIndex]; + + for (size_t i = 0; i < buffers->size(); ++i) { + BufferInfo *info = &buffers->editItemAt(i); + + if (info->mBufferID == bufferID) { + CHECK(info->mNotify == NULL); + CHECK(msg->findMessage("reply", &info->mNotify)); + + mAvailPortBuffers[portIndex].push_back(i); + + return i; + } + } + + TRESPASS(); + + return 0; +} + +status_t MediaCodec::onQueueInputBuffer(const sp<AMessage> &msg) { + size_t index; + size_t offset; + size_t size; + int64_t timeUs; + uint32_t flags; + CHECK(msg->findSize("index", &index)); + CHECK(msg->findSize("offset", &offset)); + CHECK(msg->findSize("size", &size)); + CHECK(msg->findInt64("timeUs", &timeUs)); + CHECK(msg->findInt32("flags", (int32_t *)&flags)); + + if (index >= mPortBuffers[kPortIndexInput].size()) { + return -ERANGE; + } + + BufferInfo *info = &mPortBuffers[kPortIndexInput].editItemAt(index); + + if (info->mNotify == NULL || !info->mOwnedByClient) { + return -EACCES; + } + + if (offset + size > info->mData->capacity()) { + return -EINVAL; + } + + sp<AMessage> reply = info->mNotify; + info->mNotify = NULL; + info->mOwnedByClient = false; + + info->mData->setRange(offset, size); + info->mData->meta()->setInt64("timeUs", timeUs); + + if (flags & BUFFER_FLAG_EOS) { + info->mData->meta()->setInt32("eos", true); + } + + if (flags & BUFFER_FLAG_CODECCONFIG) { + info->mData->meta()->setInt32("csd", true); + } + + reply->setBuffer("buffer", info->mData); + reply->post(); + + return OK; +} + +status_t MediaCodec::onReleaseOutputBuffer(const sp<AMessage> &msg) { + size_t index; + CHECK(msg->findSize("index", &index)); + + int32_t render; + if (!msg->findInt32("render", &render)) { + render = 0; + } + + if (mState != STARTED) { + return -EINVAL; + } + + if (index >= mPortBuffers[kPortIndexOutput].size()) { + return -ERANGE; + } + + BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(index); + + if (info->mNotify == NULL || !info->mOwnedByClient) { + return -EACCES; + } + + if (render) { + info->mNotify->setInt32("render", true); + + if (mSoftRenderer != NULL) { + mSoftRenderer->render( + info->mData->data(), info->mData->size(), NULL); + } + } + + info->mNotify->post(); + info->mNotify = NULL; + info->mOwnedByClient = false; + + return OK; +} + +ssize_t MediaCodec::dequeuePortBuffer(int32_t portIndex) { + CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput); + + List<size_t> *availBuffers = &mAvailPortBuffers[portIndex]; + + if (availBuffers->empty()) { + return -EAGAIN; + } + + size_t index = *availBuffers->begin(); + availBuffers->erase(availBuffers->begin()); + + BufferInfo *info = &mPortBuffers[portIndex].editItemAt(index); + CHECK(!info->mOwnedByClient); + info->mOwnedByClient = true; + + return index; +} + +} // namespace android diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp new file mode 100644 index 0000000..6b64e21 --- /dev/null +++ b/media/libstagefright/MediaCodecList.cpp @@ -0,0 +1,475 @@ +/* + * Copyright 2012, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "MediaCodecList" +#include <utils/Log.h> + +#include <media/stagefright/MediaCodecList.h> + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/MediaErrors.h> +#include <utils/threads.h> + +#include <expat.h> + +namespace android { + +static Mutex sInitMutex; + +// static +MediaCodecList *MediaCodecList::sCodecList; + +// static +const MediaCodecList *MediaCodecList::getInstance() { + Mutex::Autolock autoLock(sInitMutex); + + if (sCodecList == NULL) { + sCodecList = new MediaCodecList; + } + + return sCodecList->initCheck() == OK ? sCodecList : NULL; +} + +MediaCodecList::MediaCodecList() + : mInitCheck(NO_INIT) { + FILE *file = fopen("/etc/media_codecs.xml", "r"); + + if (file == NULL) { + ALOGW("unable to open media codecs configuration xml file."); + return; + } + + parseXMLFile(file); + + if (mInitCheck == OK) { + // These are currently still used by the video editing suite. + + addMediaCodec(true /* encoder */, "AACEncoder", "audio/mp4a-latm"); + addMediaCodec(true /* encoder */, "AVCEncoder", "video/avc"); + + addMediaCodec(true /* encoder */, "M4vH263Encoder"); + addType("video/3gpp"); + addType("video/mp4v-es"); + } + +#if 0 + for (size_t i = 0; i < mCodecInfos.size(); ++i) { + const CodecInfo &info = mCodecInfos.itemAt(i); + + AString line = info.mName; + line.append(" supports "); + for (size_t j = 0; j < mTypes.size(); ++j) { + uint32_t value = mTypes.valueAt(j); + + if (info.mTypes & (1ul << value)) { + line.append(mTypes.keyAt(j)); + line.append(" "); + } + } + + ALOGI("%s", line.c_str()); + } +#endif + + fclose(file); + file = NULL; +} + +MediaCodecList::~MediaCodecList() { +} + +status_t MediaCodecList::initCheck() const { + return mInitCheck; +} + +void MediaCodecList::parseXMLFile(FILE *file) { + mInitCheck = OK; + mCurrentSection = SECTION_TOPLEVEL; + mDepth = 0; + + XML_Parser parser = ::XML_ParserCreate(NULL); + CHECK(parser != NULL); + + ::XML_SetUserData(parser, this); + ::XML_SetElementHandler( + parser, StartElementHandlerWrapper, EndElementHandlerWrapper); + + const int BUFF_SIZE = 512; + while (mInitCheck == OK) { + void *buff = ::XML_GetBuffer(parser, BUFF_SIZE); + if (buff == NULL) { + ALOGE("failed to in call to XML_GetBuffer()"); + mInitCheck = UNKNOWN_ERROR; + break; + } + + int bytes_read = ::fread(buff, 1, BUFF_SIZE, file); + if (bytes_read < 0) { + ALOGE("failed in call to read"); + mInitCheck = ERROR_IO; + break; + } + + if (::XML_ParseBuffer(parser, bytes_read, bytes_read == 0) + != XML_STATUS_OK) { + mInitCheck = ERROR_MALFORMED; + break; + } + + if (bytes_read == 0) { + break; + } + } + + ::XML_ParserFree(parser); + + if (mInitCheck == OK) { + for (size_t i = mCodecInfos.size(); i-- > 0;) { + CodecInfo *info = &mCodecInfos.editItemAt(i); + + if (info->mTypes == 0) { + // No types supported by this component??? + + ALOGW("Component %s does not support any type of media?", + info->mName.c_str()); + + mCodecInfos.removeAt(i); + } + } + } + + if (mInitCheck != OK) { + mCodecInfos.clear(); + mCodecQuirks.clear(); + } +} + +// static +void MediaCodecList::StartElementHandlerWrapper( + void *me, const char *name, const char **attrs) { + static_cast<MediaCodecList *>(me)->startElementHandler(name, attrs); +} + +// static +void MediaCodecList::EndElementHandlerWrapper(void *me, const char *name) { + static_cast<MediaCodecList *>(me)->endElementHandler(name); +} + +void MediaCodecList::startElementHandler( + const char *name, const char **attrs) { + if (mInitCheck != OK) { + return; + } + + switch (mCurrentSection) { + case SECTION_TOPLEVEL: + { + if (!strcmp(name, "Decoders")) { + mCurrentSection = SECTION_DECODERS; + } else if (!strcmp(name, "Encoders")) { + mCurrentSection = SECTION_ENCODERS; + } + break; + } + + case SECTION_DECODERS: + { + if (!strcmp(name, "MediaCodec")) { + mInitCheck = + addMediaCodecFromAttributes(false /* encoder */, attrs); + + mCurrentSection = SECTION_DECODER; + } + break; + } + + case SECTION_ENCODERS: + { + if (!strcmp(name, "MediaCodec")) { + mInitCheck = + addMediaCodecFromAttributes(true /* encoder */, attrs); + + mCurrentSection = SECTION_ENCODER; + } + break; + } + + case SECTION_DECODER: + case SECTION_ENCODER: + { + if (!strcmp(name, "Quirk")) { + mInitCheck = addQuirk(attrs); + } else if (!strcmp(name, "Type")) { + mInitCheck = addTypeFromAttributes(attrs); + } + break; + } + + default: + break; + } + + ++mDepth; +} + +void MediaCodecList::endElementHandler(const char *name) { + if (mInitCheck != OK) { + return; + } + + switch (mCurrentSection) { + case SECTION_DECODERS: + { + if (!strcmp(name, "Decoders")) { + mCurrentSection = SECTION_TOPLEVEL; + } + break; + } + + case SECTION_ENCODERS: + { + if (!strcmp(name, "Encoders")) { + mCurrentSection = SECTION_TOPLEVEL; + } + break; + } + + case SECTION_DECODER: + { + if (!strcmp(name, "MediaCodec")) { + mCurrentSection = SECTION_DECODERS; + } + break; + } + + case SECTION_ENCODER: + { + if (!strcmp(name, "MediaCodec")) { + mCurrentSection = SECTION_ENCODERS; + } + break; + } + + default: + break; + } + + --mDepth; +} + +status_t MediaCodecList::addMediaCodecFromAttributes( + bool encoder, const char **attrs) { + const char *name = NULL; + const char *type = NULL; + + size_t i = 0; + while (attrs[i] != NULL) { + if (!strcmp(attrs[i], "name")) { + if (attrs[i + 1] == NULL) { + return -EINVAL; + } + name = attrs[i + 1]; + ++i; + } else if (!strcmp(attrs[i], "type")) { + if (attrs[i + 1] == NULL) { + return -EINVAL; + } + type = attrs[i + 1]; + ++i; + } else { + return -EINVAL; + } + + ++i; + } + + if (name == NULL) { + return -EINVAL; + } + + addMediaCodec(encoder, name, type); + + return OK; +} + +void MediaCodecList::addMediaCodec( + bool encoder, const char *name, const char *type) { + mCodecInfos.push(); + CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1); + info->mName = name; + info->mIsEncoder = encoder; + info->mTypes = 0; + info->mQuirks = 0; + + if (type != NULL) { + addType(type); + } +} + +status_t MediaCodecList::addQuirk(const char **attrs) { + const char *name = NULL; + + size_t i = 0; + while (attrs[i] != NULL) { + if (!strcmp(attrs[i], "name")) { + if (attrs[i + 1] == NULL) { + return -EINVAL; + } + name = attrs[i + 1]; + ++i; + } else { + return -EINVAL; + } + + ++i; + } + + if (name == NULL) { + return -EINVAL; + } + + uint32_t bit; + ssize_t index = mCodecQuirks.indexOfKey(name); + if (index < 0) { + bit = mCodecQuirks.size(); + + if (bit == 32) { + ALOGW("Too many distinct quirk names in configuration."); + return OK; + } + + mCodecQuirks.add(name, bit); + } else { + bit = mCodecQuirks.valueAt(index); + } + + CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1); + info->mQuirks |= 1ul << bit; + + return OK; +} + +status_t MediaCodecList::addTypeFromAttributes(const char **attrs) { + const char *name = NULL; + + size_t i = 0; + while (attrs[i] != NULL) { + if (!strcmp(attrs[i], "name")) { + if (attrs[i + 1] == NULL) { + return -EINVAL; + } + name = attrs[i + 1]; + ++i; + } else { + return -EINVAL; + } + + ++i; + } + + if (name == NULL) { + return -EINVAL; + } + + addType(name); + + return OK; +} + +void MediaCodecList::addType(const char *name) { + uint32_t bit; + ssize_t index = mTypes.indexOfKey(name); + if (index < 0) { + bit = mTypes.size(); + + if (bit == 32) { + ALOGW("Too many distinct type names in configuration."); + return; + } + + mTypes.add(name, bit); + } else { + bit = mTypes.valueAt(index); + } + + CodecInfo *info = &mCodecInfos.editItemAt(mCodecInfos.size() - 1); + info->mTypes |= 1ul << bit; +} + +ssize_t MediaCodecList::findCodecByType( + const char *type, bool encoder, size_t startIndex) const { + ssize_t typeIndex = mTypes.indexOfKey(type); + + if (typeIndex < 0) { + return -ENOENT; + } + + uint32_t typeMask = 1ul << mTypes.valueAt(typeIndex); + + while (startIndex < mCodecInfos.size()) { + const CodecInfo &info = mCodecInfos.itemAt(startIndex); + + if (info.mIsEncoder == encoder && (info.mTypes & typeMask)) { + return startIndex; + } + + ++startIndex; + } + + return -ENOENT; +} + +ssize_t MediaCodecList::findCodecByName(const char *name) const { + for (size_t i = 0; i < mCodecInfos.size(); ++i) { + const CodecInfo &info = mCodecInfos.itemAt(i); + + if (info.mName == name) { + return i; + } + } + + return -ENOENT; +} + +const char *MediaCodecList::getCodecName(size_t index) const { + if (index >= mCodecInfos.size()) { + return NULL; + } + + const CodecInfo &info = mCodecInfos.itemAt(index); + return info.mName.c_str(); +} + +bool MediaCodecList::codecHasQuirk( + size_t index, const char *quirkName) const { + if (index >= mCodecInfos.size()) { + return NULL; + } + + const CodecInfo &info = mCodecInfos.itemAt(index); + + if (info.mQuirks != 0) { + ssize_t index = mCodecQuirks.indexOfKey(quirkName); + if (index >= 0 && info.mQuirks & (1ul << mCodecQuirks.valueAt(index))) { + return true; + } + } + + return false; +} + +} // namespace android diff --git a/media/libstagefright/MediaExtractor.cpp b/media/libstagefright/MediaExtractor.cpp index 7b17d65..2171492 100644 --- a/media/libstagefright/MediaExtractor.cpp +++ b/media/libstagefright/MediaExtractor.cpp @@ -19,6 +19,7 @@ #include <utils/Log.h> #include "include/AMRExtractor.h" +#include "include/AVIExtractor.h" #include "include/MP3Extractor.h" #include "include/MPEG4Extractor.h" #include "include/WAVExtractor.h" @@ -109,10 +110,12 @@ sp<MediaExtractor> MediaExtractor::Create( ret = new MatroskaExtractor(source); } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) { ret = new MPEG2TSExtractor(source); + } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_AVI)) { + ret = new AVIExtractor(source); } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WVM)) { ret = new WVMExtractor(source); } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC_ADTS)) { - ret = new AACExtractor(source); + ret = new AACExtractor(source, meta); } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) { ret = new MPEG2PSExtractor(source); } diff --git a/media/libstagefright/MediaSourceSplitter.cpp b/media/libstagefright/MediaSourceSplitter.cpp deleted file mode 100644 index 8af0694..0000000 --- a/media/libstagefright/MediaSourceSplitter.cpp +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Copyright (C) 2010 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -//#define LOG_NDEBUG 0 -#define LOG_TAG "MediaSourceSplitter" -#include <utils/Log.h> - -#include <media/stagefright/MediaSourceSplitter.h> -#include <media/stagefright/MediaDebug.h> -#include <media/stagefright/MediaBuffer.h> -#include <media/stagefright/MetaData.h> - -namespace android { - -MediaSourceSplitter::MediaSourceSplitter(sp<MediaSource> mediaSource) { - mNumberOfClients = 0; - mSource = mediaSource; - mSourceStarted = false; - - mNumberOfClientsStarted = 0; - mNumberOfCurrentReads = 0; - mCurrentReadBit = 0; - mLastReadCompleted = true; -} - -MediaSourceSplitter::~MediaSourceSplitter() { -} - -sp<MediaSource> MediaSourceSplitter::createClient() { - Mutex::Autolock autoLock(mLock); - - sp<MediaSource> client = new Client(this, mNumberOfClients++); - mClientsStarted.push(false); - mClientsDesiredReadBit.push(0); - return client; -} - -status_t MediaSourceSplitter::start(int clientId, MetaData *params) { - Mutex::Autolock autoLock(mLock); - - ALOGV("start client (%d)", clientId); - if (mClientsStarted[clientId]) { - return OK; - } - - mNumberOfClientsStarted++; - - if (!mSourceStarted) { - ALOGV("Starting real source from client (%d)", clientId); - status_t err = mSource->start(params); - - if (err == OK) { - mSourceStarted = true; - mClientsStarted.editItemAt(clientId) = true; - mClientsDesiredReadBit.editItemAt(clientId) = !mCurrentReadBit; - } - - return err; - } else { - mClientsStarted.editItemAt(clientId) = true; - if (mLastReadCompleted) { - // Last read was completed. So join in the threads for the next read. - mClientsDesiredReadBit.editItemAt(clientId) = !mCurrentReadBit; - } else { - // Last read is ongoing. So join in the threads for the current read. - mClientsDesiredReadBit.editItemAt(clientId) = mCurrentReadBit; - } - return OK; - } -} - -status_t MediaSourceSplitter::stop(int clientId) { - Mutex::Autolock autoLock(mLock); - - ALOGV("stop client (%d)", clientId); - CHECK(clientId >= 0 && clientId < mNumberOfClients); - CHECK(mClientsStarted[clientId]); - - if (--mNumberOfClientsStarted == 0) { - ALOGV("Stopping real source from client (%d)", clientId); - status_t err = mSource->stop(); - mSourceStarted = false; - mClientsStarted.editItemAt(clientId) = false; - return err; - } else { - mClientsStarted.editItemAt(clientId) = false; - if (!mLastReadCompleted && (mClientsDesiredReadBit[clientId] == mCurrentReadBit)) { - // !mLastReadCompleted implies that buffer has been read from source, but all - // clients haven't read it. - // mClientsDesiredReadBit[clientId] == mCurrentReadBit implies that this - // client would have wanted to read from this buffer. (i.e. it has not yet - // called read() for the current read buffer.) - // Since other threads may be waiting for all the clients' reads to complete, - // signal that this read has been aborted. - signalReadComplete_lock(true); - } - return OK; - } -} - -sp<MetaData> MediaSourceSplitter::getFormat(int clientId) { - Mutex::Autolock autoLock(mLock); - - ALOGV("getFormat client (%d)", clientId); - return mSource->getFormat(); -} - -status_t MediaSourceSplitter::read(int clientId, - MediaBuffer **buffer, const MediaSource::ReadOptions *options) { - Mutex::Autolock autoLock(mLock); - - CHECK(clientId >= 0 && clientId < mNumberOfClients); - - ALOGV("read client (%d)", clientId); - *buffer = NULL; - - if (!mClientsStarted[clientId]) { - return OK; - } - - if (mCurrentReadBit != mClientsDesiredReadBit[clientId]) { - // Desired buffer has not been read from source yet. - - // If the current client is the special client with clientId = 0 - // then read from source, else wait until the client 0 has finished - // reading from source. - if (clientId == 0) { - // Wait for all client's last read to complete first so as to not - // corrupt the buffer at mLastReadMediaBuffer. - waitForAllClientsLastRead_lock(clientId); - - readFromSource_lock(options); - *buffer = mLastReadMediaBuffer; - } else { - waitForReadFromSource_lock(clientId); - - *buffer = mLastReadMediaBuffer; - (*buffer)->add_ref(); - } - CHECK(mCurrentReadBit == mClientsDesiredReadBit[clientId]); - } else { - // Desired buffer has already been read from source. Use the cached data. - CHECK(clientId != 0); - - *buffer = mLastReadMediaBuffer; - (*buffer)->add_ref(); - } - - mClientsDesiredReadBit.editItemAt(clientId) = !mClientsDesiredReadBit[clientId]; - signalReadComplete_lock(false); - - return mLastReadStatus; -} - -void MediaSourceSplitter::readFromSource_lock(const MediaSource::ReadOptions *options) { - mLastReadStatus = mSource->read(&mLastReadMediaBuffer , options); - - mCurrentReadBit = !mCurrentReadBit; - mLastReadCompleted = false; - mReadFromSourceCondition.broadcast(); -} - -void MediaSourceSplitter::waitForReadFromSource_lock(int32_t clientId) { - mReadFromSourceCondition.wait(mLock); -} - -void MediaSourceSplitter::waitForAllClientsLastRead_lock(int32_t clientId) { - if (mLastReadCompleted) { - return; - } - mAllReadsCompleteCondition.wait(mLock); - CHECK(mLastReadCompleted); -} - -void MediaSourceSplitter::signalReadComplete_lock(bool readAborted) { - if (!readAborted) { - mNumberOfCurrentReads++; - } - - if (mNumberOfCurrentReads == mNumberOfClientsStarted) { - mLastReadCompleted = true; - mNumberOfCurrentReads = 0; - mAllReadsCompleteCondition.broadcast(); - } -} - -status_t MediaSourceSplitter::pause(int clientId) { - return ERROR_UNSUPPORTED; -} - -// Client - -MediaSourceSplitter::Client::Client( - sp<MediaSourceSplitter> splitter, - int32_t clientId) { - mSplitter = splitter; - mClientId = clientId; -} - -status_t MediaSourceSplitter::Client::start(MetaData *params) { - return mSplitter->start(mClientId, params); -} - -status_t MediaSourceSplitter::Client::stop() { - return mSplitter->stop(mClientId); -} - -sp<MetaData> MediaSourceSplitter::Client::getFormat() { - return mSplitter->getFormat(mClientId); -} - -status_t MediaSourceSplitter::Client::read( - MediaBuffer **buffer, const ReadOptions *options) { - return mSplitter->read(mClientId, buffer, options); -} - -status_t MediaSourceSplitter::Client::pause() { - return mSplitter->pause(mClientId); -} - -} // namespace android diff --git a/media/libstagefright/MetaData.cpp b/media/libstagefright/MetaData.cpp index 884f3b4..66dec90 100644 --- a/media/libstagefright/MetaData.cpp +++ b/media/libstagefright/MetaData.cpp @@ -17,7 +17,7 @@ #include <stdlib.h> #include <string.h> -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MetaData.h> namespace android { diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp index 249c298..0957426 100644 --- a/media/libstagefright/NuCachedSource2.cpp +++ b/media/libstagefright/NuCachedSource2.cpp @@ -370,6 +370,7 @@ void NuCachedSource2::onFetch() { && (mSource->flags() & DataSource::kIsHTTPBasedSource)) { ALOGV("Disconnecting at high watermark"); static_cast<HTTPBase *>(mSource.get())->disconnect(); + mFinalStatus = -EAGAIN; } } } else { @@ -549,7 +550,7 @@ ssize_t NuCachedSource2::readInternal(off64_t offset, void *data, size_t size) { size_t delta = offset - mCacheOffset; - if (mFinalStatus != OK) { + if (mFinalStatus != OK && mNumRetriesLeft == 0) { if (delta >= mCache->totalSize()) { return mFinalStatus; } @@ -591,7 +592,7 @@ status_t NuCachedSource2::seekInternal_l(off64_t offset) { size_t totalSize = mCache->totalSize(); CHECK_EQ(mCache->releaseFromStart(totalSize), totalSize); - mFinalStatus = OK; + mNumRetriesLeft = kMaxNumRetries; mFetching = true; return OK; @@ -603,8 +604,8 @@ void NuCachedSource2::resumeFetchingIfNecessary() { restartPrefetcherIfNecessary_l(true /* ignore low water threshold */); } -sp<DecryptHandle> NuCachedSource2::DrmInitialization() { - return mSource->DrmInitialization(); +sp<DecryptHandle> NuCachedSource2::DrmInitialization(const char* mime) { + return mSource->DrmInitialization(mime); } void NuCachedSource2::getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client) { diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp new file mode 100644 index 0000000..afd4763 --- /dev/null +++ b/media/libstagefright/NuMediaExtractor.cpp @@ -0,0 +1,433 @@ +/* + * Copyright 2012, The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "NuMediaExtractor" +#include <utils/Log.h> + +#include <media/stagefright/NuMediaExtractor.h> + +#include "include/ESDS.h" + +#include <media/stagefright/foundation/ABuffer.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/DataSource.h> +#include <media/stagefright/MediaBuffer.h> +#include <media/stagefright/MediaDefs.h> +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MediaExtractor.h> +#include <media/stagefright/MediaSource.h> +#include <media/stagefright/MetaData.h> +#include <media/stagefright/Utils.h> + +namespace android { + +NuMediaExtractor::NuMediaExtractor() { +} + +NuMediaExtractor::~NuMediaExtractor() { + releaseTrackSamples(); + + for (size_t i = 0; i < mSelectedTracks.size(); ++i) { + TrackInfo *info = &mSelectedTracks.editItemAt(i); + + CHECK_EQ((status_t)OK, info->mSource->stop()); + } + + mSelectedTracks.clear(); +} + +status_t NuMediaExtractor::setDataSource(const char *path) { + sp<DataSource> dataSource = DataSource::CreateFromURI(path); + + if (dataSource == NULL) { + return -ENOENT; + } + + mImpl = MediaExtractor::Create(dataSource); + + if (mImpl == NULL) { + return ERROR_UNSUPPORTED; + } + + return OK; +} + +size_t NuMediaExtractor::countTracks() const { + return mImpl == NULL ? 0 : mImpl->countTracks(); +} + +status_t NuMediaExtractor::getTrackFormat( + size_t index, sp<AMessage> *format) const { + *format = NULL; + + if (mImpl == NULL) { + return -EINVAL; + } + + if (index >= mImpl->countTracks()) { + return -ERANGE; + } + + sp<MetaData> meta = mImpl->getTrackMetaData(index); + + const char *mime; + CHECK(meta->findCString(kKeyMIMEType, &mime)); + + sp<AMessage> msg = new AMessage; + msg->setString("mime", mime); + + if (!strncasecmp("video/", mime, 6)) { + int32_t width, height; + CHECK(meta->findInt32(kKeyWidth, &width)); + CHECK(meta->findInt32(kKeyHeight, &height)); + + msg->setInt32("width", width); + msg->setInt32("height", height); + } else { + CHECK(!strncasecmp("audio/", mime, 6)); + + int32_t numChannels, sampleRate; + CHECK(meta->findInt32(kKeyChannelCount, &numChannels)); + CHECK(meta->findInt32(kKeySampleRate, &sampleRate)); + + msg->setInt32("channel-count", numChannels); + msg->setInt32("sample-rate", sampleRate); + } + + int32_t maxInputSize; + if (meta->findInt32(kKeyMaxInputSize, &maxInputSize)) { + msg->setInt32("max-input-size", maxInputSize); + } + + uint32_t type; + const void *data; + size_t size; + if (meta->findData(kKeyAVCC, &type, &data, &size)) { + // Parse the AVCDecoderConfigurationRecord + + const uint8_t *ptr = (const uint8_t *)data; + + CHECK(size >= 7); + CHECK_EQ((unsigned)ptr[0], 1u); // configurationVersion == 1 + uint8_t profile = ptr[1]; + uint8_t level = ptr[3]; + + // There is decodable content out there that fails the following + // assertion, let's be lenient for now... + // CHECK((ptr[4] >> 2) == 0x3f); // reserved + + size_t lengthSize = 1 + (ptr[4] & 3); + + // commented out check below as H264_QVGA_500_NO_AUDIO.3gp + // violates it... + // CHECK((ptr[5] >> 5) == 7); // reserved + + size_t numSeqParameterSets = ptr[5] & 31; + + ptr += 6; + size -= 6; + + sp<ABuffer> buffer = new ABuffer(1024); + buffer->setRange(0, 0); + + for (size_t i = 0; i < numSeqParameterSets; ++i) { + CHECK(size >= 2); + size_t length = U16_AT(ptr); + + ptr += 2; + size -= 2; + + CHECK(size >= length); + + memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4); + memcpy(buffer->data() + buffer->size() + 4, ptr, length); + buffer->setRange(0, buffer->size() + 4 + length); + + ptr += length; + size -= length; + } + + buffer->meta()->setInt32("csd", true); + buffer->meta()->setInt64("timeUs", 0); + + msg->setBuffer("csd-0", buffer); + + buffer = new ABuffer(1024); + buffer->setRange(0, 0); + + CHECK(size >= 1); + size_t numPictureParameterSets = *ptr; + ++ptr; + --size; + + for (size_t i = 0; i < numPictureParameterSets; ++i) { + CHECK(size >= 2); + size_t length = U16_AT(ptr); + + ptr += 2; + size -= 2; + + CHECK(size >= length); + + memcpy(buffer->data() + buffer->size(), "\x00\x00\x00\x01", 4); + memcpy(buffer->data() + buffer->size() + 4, ptr, length); + buffer->setRange(0, buffer->size() + 4 + length); + + ptr += length; + size -= length; + } + + buffer->meta()->setInt32("csd", true); + buffer->meta()->setInt64("timeUs", 0); + msg->setBuffer("csd-1", buffer); + } else if (meta->findData(kKeyESDS, &type, &data, &size)) { + ESDS esds((const char *)data, size); + CHECK_EQ(esds.InitCheck(), (status_t)OK); + + const void *codec_specific_data; + size_t codec_specific_data_size; + esds.getCodecSpecificInfo( + &codec_specific_data, &codec_specific_data_size); + + sp<ABuffer> buffer = new ABuffer(codec_specific_data_size); + + memcpy(buffer->data(), codec_specific_data, + codec_specific_data_size); + + buffer->meta()->setInt32("csd", true); + buffer->meta()->setInt64("timeUs", 0); + msg->setBuffer("csd-0", buffer); + } else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) { + sp<ABuffer> buffer = new ABuffer(size); + memcpy(buffer->data(), data, size); + + buffer->meta()->setInt32("csd", true); + buffer->meta()->setInt64("timeUs", 0); + msg->setBuffer("csd-0", buffer); + + if (!meta->findData(kKeyVorbisBooks, &type, &data, &size)) { + return -EINVAL; + } + + buffer = new ABuffer(size); + memcpy(buffer->data(), data, size); + + buffer->meta()->setInt32("csd", true); + buffer->meta()->setInt64("timeUs", 0); + msg->setBuffer("csd-1", buffer); + } + + *format = msg; + + return OK; +} + +status_t NuMediaExtractor::selectTrack(size_t index) { + if (mImpl == NULL) { + return -EINVAL; + } + + if (index >= mImpl->countTracks()) { + return -ERANGE; + } + + for (size_t i = 0; i < mSelectedTracks.size(); ++i) { + TrackInfo *info = &mSelectedTracks.editItemAt(i); + + if (info->mTrackIndex == index) { + // This track has already been selected. + return OK; + } + } + + sp<MediaSource> source = mImpl->getTrack(index); + + CHECK_EQ((status_t)OK, source->start()); + + mSelectedTracks.push(); + TrackInfo *info = &mSelectedTracks.editItemAt(mSelectedTracks.size() - 1); + + info->mSource = source; + info->mTrackIndex = index; + info->mFinalResult = OK; + info->mSample = NULL; + info->mSampleTimeUs = -1ll; + info->mFlags = 0; + + const char *mime; + CHECK(source->getFormat()->findCString(kKeyMIMEType, &mime)); + + if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) { + info->mFlags |= kIsVorbis; + } + + return OK; +} + +void NuMediaExtractor::releaseTrackSamples() { + for (size_t i = 0; i < mSelectedTracks.size(); ++i) { + TrackInfo *info = &mSelectedTracks.editItemAt(i); + + if (info->mSample != NULL) { + info->mSample->release(); + info->mSample = NULL; + + info->mSampleTimeUs = -1ll; + } + } +} + +ssize_t NuMediaExtractor::fetchTrackSamples(int64_t seekTimeUs) { + TrackInfo *minInfo = NULL; + ssize_t minIndex = -1; + + for (size_t i = 0; i < mSelectedTracks.size(); ++i) { + TrackInfo *info = &mSelectedTracks.editItemAt(i); + + if (seekTimeUs >= 0ll) { + info->mFinalResult = OK; + + if (info->mSample != NULL) { + info->mSample->release(); + info->mSample = NULL; + info->mSampleTimeUs = -1ll; + } + } else if (info->mFinalResult != OK) { + continue; + } + + if (info->mSample == NULL) { + MediaSource::ReadOptions options; + if (seekTimeUs >= 0ll) { + options.setSeekTo(seekTimeUs); + } + status_t err = info->mSource->read(&info->mSample, &options); + + if (err != OK) { + CHECK(info->mSample == NULL); + + info->mFinalResult = err; + info->mSampleTimeUs = -1ll; + continue; + } else { + CHECK(info->mSample != NULL); + CHECK(info->mSample->meta_data()->findInt64( + kKeyTime, &info->mSampleTimeUs)); + } + } + + if (minInfo == NULL || info->mSampleTimeUs < minInfo->mSampleTimeUs) { + minInfo = info; + minIndex = i; + } + } + + return minIndex; +} + +status_t NuMediaExtractor::seekTo(int64_t timeUs) { + return fetchTrackSamples(timeUs); +} + +status_t NuMediaExtractor::advance() { + ssize_t minIndex = fetchTrackSamples(); + + if (minIndex < 0) { + return ERROR_END_OF_STREAM; + } + + TrackInfo *info = &mSelectedTracks.editItemAt(minIndex); + + info->mSample->release(); + info->mSample = NULL; + info->mSampleTimeUs = -1ll; + + return OK; +} + +status_t NuMediaExtractor::readSampleData(const sp<ABuffer> &buffer) { + ssize_t minIndex = fetchTrackSamples(); + + if (minIndex < 0) { + return ERROR_END_OF_STREAM; + } + + TrackInfo *info = &mSelectedTracks.editItemAt(minIndex); + + size_t sampleSize = info->mSample->range_length(); + + if (info->mFlags & kIsVorbis) { + // Each sample's data is suffixed by the number of page samples + // or -1 if not available. + sampleSize += sizeof(int32_t); + } + + if (buffer->capacity() < sampleSize) { + return -ENOMEM; + } + + const uint8_t *src = + (const uint8_t *)info->mSample->data() + + info->mSample->range_offset(); + + memcpy((uint8_t *)buffer->data(), src, info->mSample->range_length()); + + if (info->mFlags & kIsVorbis) { + int32_t numPageSamples; + if (!info->mSample->meta_data()->findInt32( + kKeyValidSamples, &numPageSamples)) { + numPageSamples = -1; + } + + memcpy((uint8_t *)buffer->data() + info->mSample->range_length(), + &numPageSamples, + sizeof(numPageSamples)); + } + + buffer->setRange(0, sampleSize); + + return OK; +} + +status_t NuMediaExtractor::getSampleTrackIndex(size_t *trackIndex) { + ssize_t minIndex = fetchTrackSamples(); + + if (minIndex < 0) { + return ERROR_END_OF_STREAM; + } + + TrackInfo *info = &mSelectedTracks.editItemAt(minIndex); + *trackIndex = info->mTrackIndex; + + return OK; +} + +status_t NuMediaExtractor::getSampleTime(int64_t *sampleTimeUs) { + ssize_t minIndex = fetchTrackSamples(); + + if (minIndex < 0) { + return ERROR_END_OF_STREAM; + } + + TrackInfo *info = &mSelectedTracks.editItemAt(minIndex); + *sampleTimeUs = info->mSampleTimeUs; + + return OK; +} + +} // namespace android diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp index 9de873e..7cdb793 100644 --- a/media/libstagefright/OMXClient.cpp +++ b/media/libstagefright/OMXClient.cpp @@ -20,11 +20,299 @@ #include <binder/IServiceManager.h> #include <media/IMediaPlayerService.h> -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/OMXClient.h> +#include <utils/KeyedVector.h> + +#include "include/OMX.h" namespace android { +struct MuxOMX : public IOMX { + MuxOMX(const sp<IOMX> &remoteOMX); + virtual ~MuxOMX(); + + virtual IBinder *onAsBinder() { return NULL; } + + virtual bool livesLocally(node_id node, pid_t pid); + + virtual status_t listNodes(List<ComponentInfo> *list); + + virtual status_t allocateNode( + const char *name, const sp<IOMXObserver> &observer, + node_id *node); + + virtual status_t freeNode(node_id node); + + virtual status_t sendCommand( + node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param); + + virtual status_t getParameter( + node_id node, OMX_INDEXTYPE index, + void *params, size_t size); + + virtual status_t setParameter( + node_id node, OMX_INDEXTYPE index, + const void *params, size_t size); + + virtual status_t getConfig( + node_id node, OMX_INDEXTYPE index, + void *params, size_t size); + + virtual status_t setConfig( + node_id node, OMX_INDEXTYPE index, + const void *params, size_t size); + + virtual status_t getState( + node_id node, OMX_STATETYPE* state); + + virtual status_t storeMetaDataInBuffers( + node_id node, OMX_U32 port_index, OMX_BOOL enable); + + virtual status_t enableGraphicBuffers( + node_id node, OMX_U32 port_index, OMX_BOOL enable); + + virtual status_t getGraphicBufferUsage( + node_id node, OMX_U32 port_index, OMX_U32* usage); + + virtual status_t useBuffer( + node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms, + buffer_id *buffer); + + virtual status_t useGraphicBuffer( + node_id node, OMX_U32 port_index, + const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer); + + virtual status_t allocateBuffer( + node_id node, OMX_U32 port_index, size_t size, + buffer_id *buffer, void **buffer_data); + + virtual status_t allocateBufferWithBackup( + node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms, + buffer_id *buffer); + + virtual status_t freeBuffer( + node_id node, OMX_U32 port_index, buffer_id buffer); + + virtual status_t fillBuffer(node_id node, buffer_id buffer); + + virtual status_t emptyBuffer( + node_id node, + buffer_id buffer, + OMX_U32 range_offset, OMX_U32 range_length, + OMX_U32 flags, OMX_TICKS timestamp); + + virtual status_t getExtensionIndex( + node_id node, + const char *parameter_name, + OMX_INDEXTYPE *index); + +private: + mutable Mutex mLock; + + sp<IOMX> mRemoteOMX; + sp<IOMX> mLocalOMX; + + KeyedVector<node_id, bool> mIsLocalNode; + + bool isLocalNode(node_id node) const; + bool isLocalNode_l(node_id node) const; + const sp<IOMX> &getOMX(node_id node) const; + const sp<IOMX> &getOMX_l(node_id node) const; + + static bool IsSoftwareComponent(const char *name); + + DISALLOW_EVIL_CONSTRUCTORS(MuxOMX); +}; + +MuxOMX::MuxOMX(const sp<IOMX> &remoteOMX) + : mRemoteOMX(remoteOMX) { +} + +MuxOMX::~MuxOMX() { +} + +bool MuxOMX::isLocalNode(node_id node) const { + Mutex::Autolock autoLock(mLock); + + return isLocalNode_l(node); +} + +bool MuxOMX::isLocalNode_l(node_id node) const { + return mIsLocalNode.indexOfKey(node) >= 0; +} + +// static +bool MuxOMX::IsSoftwareComponent(const char *name) { + return !strncasecmp(name, "OMX.google.", 11); +} + +const sp<IOMX> &MuxOMX::getOMX(node_id node) const { + return isLocalNode(node) ? mLocalOMX : mRemoteOMX; +} + +const sp<IOMX> &MuxOMX::getOMX_l(node_id node) const { + return isLocalNode_l(node) ? mLocalOMX : mRemoteOMX; +} + +bool MuxOMX::livesLocally(node_id node, pid_t pid) { + return getOMX(node)->livesLocally(node, pid); +} + +status_t MuxOMX::listNodes(List<ComponentInfo> *list) { + Mutex::Autolock autoLock(mLock); + + if (mLocalOMX == NULL) { + mLocalOMX = new OMX; + } + + return mLocalOMX->listNodes(list); +} + +status_t MuxOMX::allocateNode( + const char *name, const sp<IOMXObserver> &observer, + node_id *node) { + Mutex::Autolock autoLock(mLock); + + sp<IOMX> omx; + + if (IsSoftwareComponent(name)) { + if (mLocalOMX == NULL) { + mLocalOMX = new OMX; + } + omx = mLocalOMX; + } else { + omx = mRemoteOMX; + } + + status_t err = omx->allocateNode(name, observer, node); + + if (err != OK) { + return err; + } + + if (omx == mLocalOMX) { + mIsLocalNode.add(*node, true); + } + + return OK; +} + +status_t MuxOMX::freeNode(node_id node) { + Mutex::Autolock autoLock(mLock); + + status_t err = getOMX_l(node)->freeNode(node); + + if (err != OK) { + return err; + } + + mIsLocalNode.removeItem(node); + + return OK; +} + +status_t MuxOMX::sendCommand( + node_id node, OMX_COMMANDTYPE cmd, OMX_S32 param) { + return getOMX(node)->sendCommand(node, cmd, param); +} + +status_t MuxOMX::getParameter( + node_id node, OMX_INDEXTYPE index, + void *params, size_t size) { + return getOMX(node)->getParameter(node, index, params, size); +} + +status_t MuxOMX::setParameter( + node_id node, OMX_INDEXTYPE index, + const void *params, size_t size) { + return getOMX(node)->setParameter(node, index, params, size); +} + +status_t MuxOMX::getConfig( + node_id node, OMX_INDEXTYPE index, + void *params, size_t size) { + return getOMX(node)->getConfig(node, index, params, size); +} + +status_t MuxOMX::setConfig( + node_id node, OMX_INDEXTYPE index, + const void *params, size_t size) { + return getOMX(node)->setConfig(node, index, params, size); +} + +status_t MuxOMX::getState( + node_id node, OMX_STATETYPE* state) { + return getOMX(node)->getState(node, state); +} + +status_t MuxOMX::storeMetaDataInBuffers( + node_id node, OMX_U32 port_index, OMX_BOOL enable) { + return getOMX(node)->storeMetaDataInBuffers(node, port_index, enable); +} + +status_t MuxOMX::enableGraphicBuffers( + node_id node, OMX_U32 port_index, OMX_BOOL enable) { + return getOMX(node)->enableGraphicBuffers(node, port_index, enable); +} + +status_t MuxOMX::getGraphicBufferUsage( + node_id node, OMX_U32 port_index, OMX_U32* usage) { + return getOMX(node)->getGraphicBufferUsage(node, port_index, usage); +} + +status_t MuxOMX::useBuffer( + node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms, + buffer_id *buffer) { + return getOMX(node)->useBuffer(node, port_index, params, buffer); +} + +status_t MuxOMX::useGraphicBuffer( + node_id node, OMX_U32 port_index, + const sp<GraphicBuffer> &graphicBuffer, buffer_id *buffer) { + return getOMX(node)->useGraphicBuffer( + node, port_index, graphicBuffer, buffer); +} + +status_t MuxOMX::allocateBuffer( + node_id node, OMX_U32 port_index, size_t size, + buffer_id *buffer, void **buffer_data) { + return getOMX(node)->allocateBuffer( + node, port_index, size, buffer, buffer_data); +} + +status_t MuxOMX::allocateBufferWithBackup( + node_id node, OMX_U32 port_index, const sp<IMemory> ¶ms, + buffer_id *buffer) { + return getOMX(node)->allocateBufferWithBackup( + node, port_index, params, buffer); +} + +status_t MuxOMX::freeBuffer( + node_id node, OMX_U32 port_index, buffer_id buffer) { + return getOMX(node)->freeBuffer(node, port_index, buffer); +} + +status_t MuxOMX::fillBuffer(node_id node, buffer_id buffer) { + return getOMX(node)->fillBuffer(node, buffer); +} + +status_t MuxOMX::emptyBuffer( + node_id node, + buffer_id buffer, + OMX_U32 range_offset, OMX_U32 range_length, + OMX_U32 flags, OMX_TICKS timestamp) { + return getOMX(node)->emptyBuffer( + node, buffer, range_offset, range_length, flags, timestamp); +} + +status_t MuxOMX::getExtensionIndex( + node_id node, + const char *parameter_name, + OMX_INDEXTYPE *index) { + return getOMX(node)->getExtensionIndex(node, parameter_name, index); +} + OMXClient::OMXClient() { } @@ -38,10 +326,19 @@ status_t OMXClient::connect() { mOMX = service->getOMX(); CHECK(mOMX.get() != NULL); + if (!mOMX->livesLocally(NULL /* node */, getpid())) { + ALOGI("Using client-side OMX mux."); + mOMX = new MuxOMX(mOMX); + } + return OK; } void OMXClient::disconnect() { + if (mOMX.get() != NULL) { + mOMX.clear(); + mOMX = NULL; + } } } // namespace android diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp index 60d9bb7..966416e 100755 --- a/media/libstagefright/OMXCodec.cpp +++ b/media/libstagefright/OMXCodec.cpp @@ -19,8 +19,6 @@ #include <utils/Log.h> #include "include/AACEncoder.h" -#include "include/AMRNBEncoder.h" -#include "include/AMRWBEncoder.h" #include "include/AVCEncoder.h" #include "include/M4vH263Encoder.h" @@ -35,6 +33,7 @@ #include <media/stagefright/MediaBuffer.h> #include <media/stagefright/MediaBufferGroup.h> #include <media/stagefright/MediaDefs.h> +#include <media/stagefright/MediaCodecList.h> #include <media/stagefright/MediaExtractor.h> #include <media/stagefright/MetaData.h> #include <media/stagefright/OMXCodec.h> @@ -59,11 +58,6 @@ const static int64_t kBufferFilledEventTimeOutNs = 3000000000LL; // component in question is buggy or not. const static uint32_t kMaxColorFormatSupported = 1000; -struct CodecInfo { - const char *mime; - const char *codec; -}; - #define FACTORY_CREATE_ENCODER(name) \ static sp<MediaSource> Make##name(const sp<MediaSource> &source, const sp<MetaData> &meta) { \ return new name(source, meta); \ @@ -71,8 +65,6 @@ static sp<MediaSource> Make##name(const sp<MediaSource> &source, const sp<MetaDa #define FACTORY_REF(name) { #name, Make##name }, -FACTORY_CREATE_ENCODER(AMRNBEncoder) -FACTORY_CREATE_ENCODER(AMRWBEncoder) FACTORY_CREATE_ENCODER(AACEncoder) FACTORY_CREATE_ENCODER(AVCEncoder) FACTORY_CREATE_ENCODER(M4vH263Encoder) @@ -86,8 +78,6 @@ static sp<MediaSource> InstantiateSoftwareEncoder( }; static const FactoryInfo kFactoryInfo[] = { - FACTORY_REF(AMRNBEncoder) - FACTORY_REF(AMRWBEncoder) FACTORY_REF(AACEncoder) FACTORY_REF(AVCEncoder) FACTORY_REF(M4vH263Encoder) @@ -102,82 +92,8 @@ static sp<MediaSource> InstantiateSoftwareEncoder( return NULL; } +#undef FACTORY_CREATE_ENCODER #undef FACTORY_REF -#undef FACTORY_CREATE - -static const CodecInfo kDecoderInfo[] = { - { MEDIA_MIMETYPE_IMAGE_JPEG, "OMX.TI.JPEG.decode" }, -// { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.TI.MP3.decode" }, - { MEDIA_MIMETYPE_AUDIO_MPEG, "OMX.google.mp3.decoder" }, - { MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II, "OMX.Nvidia.mp2.decoder" }, -// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.TI.AMR.decode" }, -// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.Nvidia.amr.decoder" }, - { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.google.amrnb.decoder" }, -// { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.Nvidia.amrwb.decoder" }, - { MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.TI.WBAMR.decode" }, - { MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.google.amrwb.decoder" }, -// { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.Nvidia.aac.decoder" }, - { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.TI.AAC.decode" }, - { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.google.aac.decoder" }, - { MEDIA_MIMETYPE_AUDIO_G711_ALAW, "OMX.google.g711.alaw.decoder" }, - { MEDIA_MIMETYPE_AUDIO_G711_MLAW, "OMX.google.g711.mlaw.decoder" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.DUCATI1.VIDEO.DECODER" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.decode" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.decoder.mpeg4" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.video.decoder.mpeg4" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.Decoder" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Decoder" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.google.mpeg4.decoder" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.DUCATI1.VIDEO.DECODER" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.decode" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.decoder.h263" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.decoder.h263" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Decoder" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.google.h263.decoder" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.DUCATI1.VIDEO.DECODER" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.Nvidia.h264.decode" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.decoder.avc" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.decoder.avc" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.Video.Decoder" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.SEC.AVC.Decoder" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.google.h264.decoder" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.google.avc.decoder" }, - { MEDIA_MIMETYPE_AUDIO_VORBIS, "OMX.google.vorbis.decoder" }, - { MEDIA_MIMETYPE_VIDEO_VPX, "OMX.google.vpx.decoder" }, - { MEDIA_MIMETYPE_VIDEO_MPEG2, "OMX.Nvidia.mpeg2v.decode" }, -}; - -static const CodecInfo kEncoderInfo[] = { - { MEDIA_MIMETYPE_AUDIO_AMR_NB, "OMX.TI.AMR.encode" }, - { MEDIA_MIMETYPE_AUDIO_AMR_NB, "AMRNBEncoder" }, - { MEDIA_MIMETYPE_AUDIO_AMR_WB, "OMX.TI.WBAMR.encode" }, - { MEDIA_MIMETYPE_AUDIO_AMR_WB, "AMRWBEncoder" }, - { MEDIA_MIMETYPE_AUDIO_AAC, "OMX.TI.AAC.encode" }, - { MEDIA_MIMETYPE_AUDIO_AAC, "AACEncoder" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.DUCATI1.VIDEO.MPEG4E" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.7x30.video.encoder.mpeg4" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.qcom.video.encoder.mpeg4" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.TI.Video.encoder" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.Nvidia.mp4.encoder" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "OMX.SEC.MPEG4.Encoder" }, - { MEDIA_MIMETYPE_VIDEO_MPEG4, "M4vH263Encoder" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.DUCATI1.VIDEO.MPEG4E" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.7x30.video.encoder.h263" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.qcom.video.encoder.h263" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.TI.Video.encoder" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.Nvidia.h263.encoder" }, - { MEDIA_MIMETYPE_VIDEO_H263, "OMX.SEC.H263.Encoder" }, - { MEDIA_MIMETYPE_VIDEO_H263, "M4vH263Encoder" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.DUCATI1.VIDEO.H264E" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.7x30.video.encoder.avc" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.qcom.video.encoder.avc" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.TI.Video.encoder" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.Nvidia.h264.encoder" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "OMX.SEC.AVC.Encoder" }, - { MEDIA_MIMETYPE_VIDEO_AVC, "AVCEncoder" }, -}; - -#undef OPTIONAL #define CODEC_LOGI(x, ...) ALOGI("[%s] "x, mComponentName, ##__VA_ARGS__) #define CODEC_LOGV(x, ...) ALOGV("[%s] "x, mComponentName, ##__VA_ARGS__) @@ -212,22 +128,6 @@ private: OMXCodecObserver &operator=(const OMXCodecObserver &); }; -static const char *GetCodec(const CodecInfo *info, size_t numInfos, - const char *mime, int index) { - CHECK(index >= 0); - for(size_t i = 0; i < numInfos; ++i) { - if (!strcasecmp(mime, info[i].mime)) { - if (index == 0) { - return info[i].codec; - } - - --index; - } - } - - return NULL; -} - template<class T> static void InitOMXParams(T *params) { params->nSize = sizeof(T); @@ -283,119 +183,36 @@ static int CompareSoftwareCodecsFirst( } // static -uint32_t OMXCodec::getComponentQuirks( - const char *componentName, bool isEncoder) { - uint32_t quirks = 0; - - if (!strcmp(componentName, "OMX.Nvidia.amr.decoder") || - !strcmp(componentName, "OMX.Nvidia.amrwb.decoder") || - !strcmp(componentName, "OMX.Nvidia.aac.decoder") || - !strcmp(componentName, "OMX.Nvidia.mp3.decoder")) { - quirks |= kDecoderLiesAboutNumberOfChannels; - } - - if (!strcmp(componentName, "OMX.TI.MP3.decode")) { - quirks |= kNeedsFlushBeforeDisable; - quirks |= kDecoderLiesAboutNumberOfChannels; - } - if (!strcmp(componentName, "OMX.TI.AAC.decode")) { - quirks |= kNeedsFlushBeforeDisable; - quirks |= kRequiresFlushCompleteEmulation; - quirks |= kSupportsMultipleFramesPerInputBuffer; - } - if (!strncmp(componentName, "OMX.qcom.video.encoder.", 23)) { - quirks |= kRequiresLoadedToIdleAfterAllocation; - quirks |= kRequiresAllocateBufferOnInputPorts; - quirks |= kRequiresAllocateBufferOnOutputPorts; - if (!strncmp(componentName, "OMX.qcom.video.encoder.avc", 26)) { - - // The AVC encoder advertises the size of output buffers - // based on the input video resolution and assumes - // the worst/least compression ratio is 0.5. It is found that - // sometimes, the output buffer size is larger than - // size advertised by the encoder. - quirks |= kRequiresLargerEncoderOutputBuffer; - } - } - if (!strncmp(componentName, "OMX.qcom.7x30.video.encoder.", 28)) { - } - if (!strncmp(componentName, "OMX.qcom.video.decoder.", 23)) { - quirks |= kRequiresAllocateBufferOnOutputPorts; - quirks |= kDefersOutputBufferAllocation; - } - if (!strncmp(componentName, "OMX.qcom.7x30.video.decoder.", 28)) { - quirks |= kRequiresAllocateBufferOnInputPorts; - quirks |= kRequiresAllocateBufferOnOutputPorts; - quirks |= kDefersOutputBufferAllocation; - } - - if (!strcmp(componentName, "OMX.TI.DUCATI1.VIDEO.DECODER")) { - quirks |= kRequiresAllocateBufferOnInputPorts; - quirks |= kRequiresAllocateBufferOnOutputPorts; - } - - // FIXME: - // Remove the quirks after the work is done. - else if (!strcmp(componentName, "OMX.TI.DUCATI1.VIDEO.MPEG4E") || - !strcmp(componentName, "OMX.TI.DUCATI1.VIDEO.H264E")) { - - quirks |= kRequiresAllocateBufferOnInputPorts; - quirks |= kRequiresAllocateBufferOnOutputPorts; - } - else if (!strncmp(componentName, "OMX.TI.", 7)) { - // Apparently I must not use OMX_UseBuffer on either input or - // output ports on any of the TI components or quote: - // "(I) may have unexpected problem (sic) which can be timing related - // and hard to reproduce." - - quirks |= kRequiresAllocateBufferOnInputPorts; - quirks |= kRequiresAllocateBufferOnOutputPorts; - if (!strncmp(componentName, "OMX.TI.Video.encoder", 20)) { - quirks |= kAvoidMemcopyInputRecordingFrames; - } - } - - if (!strcmp(componentName, "OMX.TI.Video.Decoder")) { - quirks |= kInputBufferSizesAreBogus; - } - - if (!strncmp(componentName, "OMX.SEC.", 8) && !isEncoder) { - // These output buffers contain no video data, just some - // opaque information that allows the overlay to display their - // contents. - quirks |= kOutputBuffersAreUnreadable; - } - - return quirks; -} - -// static void OMXCodec::findMatchingCodecs( const char *mime, bool createEncoder, const char *matchComponentName, uint32_t flags, - Vector<String8> *matchingCodecs) { + Vector<String8> *matchingCodecs, + Vector<uint32_t> *matchingCodecQuirks) { matchingCodecs->clear(); - for (int index = 0;; ++index) { - const char *componentName; + if (matchingCodecQuirks) { + matchingCodecQuirks->clear(); + } - if (createEncoder) { - componentName = GetCodec( - kEncoderInfo, - sizeof(kEncoderInfo) / sizeof(kEncoderInfo[0]), - mime, index); - } else { - componentName = GetCodec( - kDecoderInfo, - sizeof(kDecoderInfo) / sizeof(kDecoderInfo[0]), - mime, index); - } + const MediaCodecList *list = MediaCodecList::getInstance(); + if (list == NULL) { + return; + } - if (!componentName) { + size_t index = 0; + for (;;) { + ssize_t matchIndex = + list->findCodecByType(mime, createEncoder, index); + + if (matchIndex < 0) { break; } + index = matchIndex + 1; + + const char *componentName = list->getCodecName(matchIndex); + // If a specific codec is requested, skip the non-matching ones. if (matchComponentName && strcmp(componentName, matchComponentName)) { continue; @@ -410,6 +227,10 @@ void OMXCodec::findMatchingCodecs( (!(flags & (kSoftwareCodecsOnly | kHardwareCodecsOnly)))) { matchingCodecs->push(String8(componentName)); + + if (matchingCodecQuirks) { + matchingCodecQuirks->push(getComponentQuirks(list, matchIndex)); + } } } @@ -419,6 +240,45 @@ void OMXCodec::findMatchingCodecs( } // static +uint32_t OMXCodec::getComponentQuirks( + const MediaCodecList *list, size_t index) { + uint32_t quirks = 0; + if (list->codecHasQuirk( + index, "requires-allocate-on-input-ports")) { + quirks |= kRequiresAllocateBufferOnInputPorts; + } + if (list->codecHasQuirk( + index, "requires-allocate-on-output-ports")) { + quirks |= kRequiresAllocateBufferOnOutputPorts; + } + if (list->codecHasQuirk( + index, "output-buffers-are-unreadable")) { + quirks |= kOutputBuffersAreUnreadable; + } + + return quirks; +} + +// static +bool OMXCodec::findCodecQuirks(const char *componentName, uint32_t *quirks) { + const MediaCodecList *list = MediaCodecList::getInstance(); + + if (list == NULL) { + return false; + } + + ssize_t index = list->findCodecByName(componentName); + + if (index < 0) { + return false; + } + + *quirks = getComponentQuirks(list, index); + + return true; +} + +// static sp<MediaSource> OMXCodec::Create( const sp<IOMX> &omx, const sp<MetaData> &meta, bool createEncoder, @@ -440,8 +300,10 @@ sp<MediaSource> OMXCodec::Create( CHECK(success); Vector<String8> matchingCodecs; + Vector<uint32_t> matchingCodecQuirks; findMatchingCodecs( - mime, createEncoder, matchComponentName, flags, &matchingCodecs); + mime, createEncoder, matchComponentName, flags, + &matchingCodecs, &matchingCodecQuirks); if (matchingCodecs.isEmpty()) { return NULL; @@ -452,6 +314,7 @@ sp<MediaSource> OMXCodec::Create( for (size_t i = 0; i < matchingCodecs.size(); ++i) { const char *componentNameBase = matchingCodecs[i].string(); + uint32_t quirks = matchingCodecQuirks[i]; const char *componentName = componentNameBase; AString tmp; @@ -475,8 +338,6 @@ sp<MediaSource> OMXCodec::Create( ALOGV("Attempting to allocate OMX node '%s'", componentName); - uint32_t quirks = getComponentQuirks(componentNameBase, createEncoder); - if (!createEncoder && (quirks & kOutputBuffersAreUnreadable) && (flags & kClientNeedsFramebuffer)) { @@ -632,16 +493,6 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) { CODEC_LOGI( "AVC profile = %u (%s), level = %u", profile, AVCProfileToString(profile), level); - - if (!strcmp(mComponentName, "OMX.TI.Video.Decoder") - && (profile != kAVCProfileBaseline || level > 30)) { - // This stream exceeds the decoder's capabilities. The decoder - // does not handle this gracefully and would clobber the heap - // and wreak havoc instead... - - ALOGE("Profile and/or level exceed the decoder's capabilities."); - return ERROR_UNSUPPORTED; - } } else if (meta->findData(kKeyVorbisInfo, &type, &data, &size)) { addCodecSpecificData(data, size); @@ -697,40 +548,11 @@ status_t OMXCodec::configureCodec(const sp<MetaData> &meta) { } } - if (!strcasecmp(mMIME, MEDIA_MIMETYPE_IMAGE_JPEG) - && !strcmp(mComponentName, "OMX.TI.JPEG.decode")) { - OMX_COLOR_FORMATTYPE format = - OMX_COLOR_Format32bitARGB8888; - // OMX_COLOR_FormatYUV420PackedPlanar; - // OMX_COLOR_FormatCbYCrY; - // OMX_COLOR_FormatYUV411Planar; - - int32_t width, height; - bool success = meta->findInt32(kKeyWidth, &width); - success = success && meta->findInt32(kKeyHeight, &height); - - int32_t compressedSize; - success = success && meta->findInt32( - kKeyMaxInputSize, &compressedSize); - - CHECK(success); - CHECK(compressedSize > 0); - - setImageOutputFormat(format, width, height); - setJPEGInputFormat(width, height, (OMX_U32)compressedSize); - } - int32_t maxInputSize; if (meta->findInt32(kKeyMaxInputSize, &maxInputSize)) { setMinBufferSize(kPortIndexInput, (OMX_U32)maxInputSize); } - if (!strcmp(mComponentName, "OMX.TI.AMR.encode") - || !strcmp(mComponentName, "OMX.TI.WBAMR.encode") - || !strcmp(mComponentName, "OMX.TI.AAC.encode")) { - setMinBufferSize(kPortIndexOutput, 8192); // XXX - } - initOutputFormat(meta); if ((mFlags & kClientNeedsFramebuffer) @@ -834,21 +656,6 @@ status_t OMXCodec::setVideoPortFormatType( index, format.eCompressionFormat, format.eColorFormat); #endif - if (!strcmp("OMX.TI.Video.encoder", mComponentName)) { - if (portIndex == kPortIndexInput - && colorFormat == format.eColorFormat) { - // eCompressionFormat does not seem right. - found = true; - break; - } - if (portIndex == kPortIndexOutput - && compressionFormat == format.eCompressionFormat) { - // eColorFormat does not seem right. - found = true; - break; - } - } - if (format.eCompressionFormat == compressionFormat && format.eColorFormat == colorFormat) { found = true; @@ -911,13 +718,8 @@ status_t OMXCodec::findTargetColorFormat( int32_t targetColorFormat; if (meta->findInt32(kKeyColorFormat, &targetColorFormat)) { *colorFormat = (OMX_COLOR_FORMATTYPE) targetColorFormat; - } else { - if (!strcasecmp("OMX.TI.Video.encoder", mComponentName)) { - *colorFormat = OMX_COLOR_FormatYCbYCr; - } } - // Check whether the target color format is supported. return isColorFormatSupported(*colorFormat, kPortIndexInput); } @@ -1482,11 +1284,12 @@ OMXCodec::OMXCodec( const sp<MediaSource> &source, const sp<ANativeWindow> &nativeWindow) : mOMX(omx), - mOMXLivesLocally(omx->livesLocally(getpid())), + mOMXLivesLocally(omx->livesLocally(node, getpid())), mNode(node), mQuirks(quirks), mFlags(flags), mIsEncoder(isEncoder), + mIsVideo(!strncasecmp("video/", mime, 6)), mMIME(strdup(mime)), mComponentName(strdup(componentName)), mSource(source), @@ -1545,6 +1348,8 @@ void OMXCodec::setComponentRole( "video_decoder.mpeg4", "video_encoder.mpeg4" }, { MEDIA_MIMETYPE_VIDEO_H263, "video_decoder.h263", "video_encoder.h263" }, + { MEDIA_MIMETYPE_VIDEO_VPX, + "video_decoder.vpx", "video_encoder.vpx" }, }; static const size_t kNumMimeToRole = @@ -2191,8 +1996,8 @@ error: } } -int64_t OMXCodec::retrieveDecodingTimeUs(bool isCodecSpecific) { - CHECK(mIsEncoder); +int64_t OMXCodec::getDecodingTimeUs() { + CHECK(mIsEncoder && mIsVideo); if (mDecodingTimeList.empty()) { CHECK(mSignalledEOS || mNoMoreOutputData); @@ -2203,12 +2008,7 @@ int64_t OMXCodec::retrieveDecodingTimeUs(bool isCodecSpecific) { List<int64_t>::iterator it = mDecodingTimeList.begin(); int64_t timeUs = *it; - - // If the output buffer is codec specific configuration, - // do not remove the decoding time from the list. - if (!isCodecSpecific) { - mDecodingTimeList.erase(it); - } + mDecodingTimeList.erase(it); return timeUs; } @@ -2387,8 +2187,8 @@ void OMXCodec::on_message(const omx_message &msg) { mNoMoreOutputData = true; } - if (mIsEncoder) { - int64_t decodingTimeUs = retrieveDecodingTimeUs(isCodecSpecific); + if (mIsEncoder && mIsVideo) { + int64_t decodingTimeUs = isCodecSpecific? 0: getDecodingTimeUs(); buffer->meta_data()->setInt64(kKeyDecodingTime, decodingTimeUs); } @@ -3249,7 +3049,7 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) { int64_t lastBufferTimeUs; CHECK(srcBuffer->meta_data()->findInt64(kKeyTime, &lastBufferTimeUs)); CHECK(lastBufferTimeUs >= 0); - if (mIsEncoder) { + if (mIsEncoder && mIsVideo) { mDecodingTimeList.push_back(lastBufferTimeUs); } @@ -3333,13 +3133,6 @@ bool OMXCodec::drainInputBuffer(BufferInfo *info) { info->mStatus = OWNED_BY_COMPONENT; - // This component does not ever signal the EOS flag on output buffers, - // Thanks for nothing. - if (mSignalledEOS && !strcmp(mComponentName, "OMX.TI.Video.encoder")) { - mNoMoreOutputData = true; - mBufferFilled.signal(); - } - return true; } @@ -3565,6 +3358,7 @@ status_t OMXCodec::setAACFormat(int32_t numChannels, int32_t sampleRate, int32_t //////////////// output port //////////////////// // format OMX_AUDIO_PARAM_PORTFORMATTYPE format; + InitOMXParams(&format); format.nPortIndex = kPortIndexOutput; format.nIndex = 0; status_t err = OMX_ErrorNone; diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp index 73efc27..5e79e78 100644 --- a/media/libstagefright/OggExtractor.cpp +++ b/media/libstagefright/OggExtractor.cpp @@ -21,10 +21,10 @@ #include "include/OggExtractor.h" #include <cutils/properties.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/MediaBuffer.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaSource.h> diff --git a/media/libstagefright/SampleIterator.cpp b/media/libstagefright/SampleIterator.cpp index 81ec5c1..eae721b 100644 --- a/media/libstagefright/SampleIterator.cpp +++ b/media/libstagefright/SampleIterator.cpp @@ -22,8 +22,8 @@ #include <arpa/inet.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/DataSource.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/Utils.h> #include "include/SampleTable.h" diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp index 8d80d63..d9858d7 100644 --- a/media/libstagefright/SampleTable.cpp +++ b/media/libstagefright/SampleTable.cpp @@ -618,26 +618,31 @@ status_t SampleTable::findSyncSampleNear( } uint32_t left = 0; - while (left < mNumSyncSamples) { - uint32_t x = mSyncSamples[left]; + uint32_t right = mNumSyncSamples; + while (left < right) { + uint32_t center = left + (right - left) / 2; + uint32_t x = mSyncSamples[center]; - if (x >= start_sample_index) { + if (start_sample_index < x) { + right = center; + } else if (start_sample_index > x) { + left = center + 1; + } else { + left = center; break; } - - ++left; } - if (left == mNumSyncSamples) { if (flags == kFlagAfter) { ALOGE("tried to find a sync frame after the last one: %d", left); return ERROR_OUT_OF_RANGE; } + left = left - 1; } - if (left > 0) { - --left; - } + // Now ssi[left] is the sync sample index just before (or at) + // start_sample_index. + // Also start_sample_index < ssi[left + 1], if left + 1 < mNumSyncSamples. uint32_t x = mSyncSamples[left]; @@ -682,7 +687,11 @@ status_t SampleTable::findSyncSampleNear( x = mSyncSamples[left - 1]; - CHECK(x <= start_sample_index); + if (x > start_sample_index) { + // The table of sync sample indices was not sorted + // properly. + return ERROR_MALFORMED; + } } break; } @@ -696,7 +705,11 @@ status_t SampleTable::findSyncSampleNear( x = mSyncSamples[left + 1]; - CHECK(x >= start_sample_index); + if (x < start_sample_index) { + // The table of sync sample indices was not sorted + // properly. + return ERROR_MALFORMED; + } } break; diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp index 43bfd9e..35f9c1f 100644 --- a/media/libstagefright/StagefrightMetadataRetriever.cpp +++ b/media/libstagefright/StagefrightMetadataRetriever.cpp @@ -20,10 +20,10 @@ #include "include/StagefrightMetadataRetriever.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/ColorConverter.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/FileSource.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaExtractor.h> #include <media/stagefright/MetaData.h> #include <media/stagefright/OMXCodec.h> @@ -37,7 +37,7 @@ StagefrightMetadataRetriever::StagefrightMetadataRetriever() ALOGV("StagefrightMetadataRetriever()"); DataSource::RegisterDefaultSniffers(); - CHECK_EQ(mClient.connect(), OK); + CHECK_EQ(mClient.connect(), (status_t)OK); } StagefrightMetadataRetriever::~StagefrightMetadataRetriever() { @@ -169,7 +169,7 @@ static VideoFrame *extractVideoFrameWithCodecFlags( || (buffer != NULL && buffer->range_length() == 0)); if (err != OK) { - CHECK_EQ(buffer, NULL); + CHECK(buffer == NULL); ALOGV("decoding frame failed."); decoder->stop(); diff --git a/media/libstagefright/SurfaceMediaSource.cpp b/media/libstagefright/SurfaceMediaSource.cpp index 48df058..ab2cff0 100644 --- a/media/libstagefright/SurfaceMediaSource.cpp +++ b/media/libstagefright/SurfaceMediaSource.cpp @@ -16,22 +16,23 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "SurfaceMediaSource" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/SurfaceMediaSource.h> -#include <ui/GraphicBuffer.h> #include <media/stagefright/MetaData.h> #include <media/stagefright/MediaDefs.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/openmax/OMX_IVCommon.h> #include <media/stagefright/MetadataBufferType.h> -#include <surfaceflinger/ISurfaceComposer.h> -#include <surfaceflinger/SurfaceComposerClient.h> -#include <surfaceflinger/IGraphicBufferAlloc.h> +#include <ui/GraphicBuffer.h> +#include <gui/ISurfaceComposer.h> +#include <gui/IGraphicBufferAlloc.h> #include <OMX_Component.h> #include <utils/Log.h> #include <utils/String8.h> +#include <private/gui/ComposerService.h> + namespace android { SurfaceMediaSource::SurfaceMediaSource(uint32_t bufW, uint32_t bufH) : @@ -58,7 +59,7 @@ SurfaceMediaSource::SurfaceMediaSource(uint32_t bufW, uint32_t bufH) : SurfaceMediaSource::~SurfaceMediaSource() { ALOGV("SurfaceMediaSource::~SurfaceMediaSource"); if (!mStopped) { - stop(); + reset(); } } @@ -714,9 +715,9 @@ status_t SurfaceMediaSource::start(MetaData *params) } -status_t SurfaceMediaSource::stop() +status_t SurfaceMediaSource::reset() { - ALOGV("Stop"); + ALOGV("Reset"); Mutex::Autolock lock(mMutex); // TODO: Add waiting on mFrameCompletedCondition here? @@ -853,7 +854,7 @@ void SurfaceMediaSource::signalBufferReturned(MediaBuffer *buffer) { } if (!foundBuffer) { - CHECK_EQ(0, "signalBufferReturned: bogus buffer"); + CHECK(!"signalBufferReturned: bogus buffer"); } } diff --git a/media/libstagefright/ThrottledSource.cpp b/media/libstagefright/ThrottledSource.cpp index 88e07b0..b1fcafd 100644 --- a/media/libstagefright/ThrottledSource.cpp +++ b/media/libstagefright/ThrottledSource.cpp @@ -16,7 +16,7 @@ #include "include/ThrottledSource.h" -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> namespace android { diff --git a/media/libstagefright/TimedEventQueue.cpp b/media/libstagefright/TimedEventQueue.cpp index 12c9c36..6d345bb 100644 --- a/media/libstagefright/TimedEventQueue.cpp +++ b/media/libstagefright/TimedEventQueue.cpp @@ -26,12 +26,10 @@ #include "include/TimedEventQueue.h" -#include <cutils/sched_policy.h> - #include <sys/prctl.h> #include <sys/time.h> -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> #ifdef ANDROID_SIMULATOR #include <jni.h> diff --git a/media/libstagefright/VideoSourceDownSampler.cpp b/media/libstagefright/VideoSourceDownSampler.cpp index 1b66990..90a42c9 100644 --- a/media/libstagefright/VideoSourceDownSampler.cpp +++ b/media/libstagefright/VideoSourceDownSampler.cpp @@ -17,9 +17,9 @@ //#define LOG_NDEBUG 0 #define LOG_TAG "VideoSourceDownSampler" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/VideoSourceDownSampler.h> #include <media/stagefright/MediaBuffer.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MetaData.h> #include <media/stagefright/YUVImage.h> #include <media/stagefright/YUVCanvas.h> diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp index 0bcaf08..501f480 100644 --- a/media/libstagefright/WAVExtractor.cpp +++ b/media/libstagefright/WAVExtractor.cpp @@ -20,9 +20,9 @@ #include "include/WAVExtractor.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaSource.h> @@ -217,7 +217,7 @@ status_t WAVExtractor::init() { kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_ALAW); break; default: - CHECK_EQ(mWaveFormat, WAVE_FORMAT_MULAW); + CHECK_EQ(mWaveFormat, (uint16_t)WAVE_FORMAT_MULAW); mTrackMeta->setCString( kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_G711_MLAW); break; @@ -362,7 +362,7 @@ status_t WAVSource::read( // Convert 8-bit unsigned samples to 16-bit signed. MediaBuffer *tmp; - CHECK_EQ(mGroup->acquire_buffer(&tmp), OK); + CHECK_EQ(mGroup->acquire_buffer(&tmp), (status_t)OK); // The new buffer holds the sample number of samples, but each // one is 2 bytes wide. diff --git a/media/libstagefright/WVMExtractor.cpp b/media/libstagefright/WVMExtractor.cpp index 2092cb6..dac8106 100644 --- a/media/libstagefright/WVMExtractor.cpp +++ b/media/libstagefright/WVMExtractor.cpp @@ -21,6 +21,7 @@ #include <arpa/inet.h> #include <utils/String8.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/Utils.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/MediaSource.h> @@ -28,7 +29,6 @@ #include <media/stagefright/MetaData.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaBuffer.h> -#include <media/stagefright/MediaDebug.h> #include <dlfcn.h> #include <utils/Errors.h> @@ -45,17 +45,12 @@ namespace android { static Mutex gWVMutex; WVMExtractor::WVMExtractor(const sp<DataSource> &source) - : mDataSource(source) { - { - Mutex::Autolock autoLock(gWVMutex); - if (gVendorLibHandle == NULL) { - gVendorLibHandle = dlopen("libwvm.so", RTLD_NOW); - } + : mDataSource(source) +{ + Mutex::Autolock autoLock(gWVMutex); - if (gVendorLibHandle == NULL) { - ALOGE("Failed to open libwvm.so"); - return; - } + if (!getVendorLibHandle()) { + return; } typedef WVMLoadableExtractor *(*GetInstanceFunc)(sp<DataSource>); @@ -64,13 +59,28 @@ WVMExtractor::WVMExtractor(const sp<DataSource> &source) "_ZN7android11GetInstanceENS_2spINS_10DataSourceEEE"); if (getInstanceFunc) { + CHECK(source->DrmInitialization(MEDIA_MIMETYPE_CONTAINER_WVM) != NULL); mImpl = (*getInstanceFunc)(source); CHECK(mImpl != NULL); + setDrmFlag(true); } else { ALOGE("Failed to locate GetInstance in libwvm.so"); } } +bool WVMExtractor::getVendorLibHandle() +{ + if (gVendorLibHandle == NULL) { + gVendorLibHandle = dlopen("libwvm.so", RTLD_NOW); + } + + if (gVendorLibHandle == NULL) { + ALOGE("Failed to open libwvm.so"); + } + + return gVendorLibHandle != NULL; +} + WVMExtractor::~WVMExtractor() { } @@ -113,5 +123,39 @@ void WVMExtractor::setAdaptiveStreamingMode(bool adaptive) { } } +void WVMExtractor::setUID(uid_t uid) { + if (mImpl != NULL) { + mImpl->setUID(uid); + } +} + +bool SniffWVM( + const sp<DataSource> &source, String8 *mimeType, float *confidence, + sp<AMessage> *) { + + Mutex::Autolock autoLock(gWVMutex); + + if (!WVMExtractor::getVendorLibHandle()) { + return false; + } + + typedef WVMLoadableExtractor *(*SnifferFunc)(const sp<DataSource>&); + SnifferFunc snifferFunc = + (SnifferFunc) dlsym(gVendorLibHandle, + "_ZN7android15IsWidevineMediaERKNS_2spINS_10DataSourceEEE"); + + if (snifferFunc) { + if ((*snifferFunc)(source)) { + *mimeType = MEDIA_MIMETYPE_CONTAINER_WVM; + *confidence = 10.0f; + return true; + } + } else { + ALOGE("IsWidevineMedia not found in libwvm.so"); + } + + return false; +} + } //namespace android diff --git a/media/libstagefright/chromium_http/Android.mk b/media/libstagefright/chromium_http/Android.mk index 6573e3c..63775f1 100644 --- a/media/libstagefright/chromium_http/Android.mk +++ b/media/libstagefright/chromium_http/Android.mk @@ -3,8 +3,9 @@ LOCAL_PATH:= $(call my-dir) include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ - ChromiumHTTPDataSource.cpp \ - support.cpp \ + DataUriSource.cpp \ + ChromiumHTTPDataSource.cpp \ + support.cpp LOCAL_C_INCLUDES:= \ $(JNI_H_INCLUDE) \ diff --git a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp index 180460b..76f7946 100644 --- a/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp +++ b/media/libstagefright/chromium_http/ChromiumHTTPDataSource.cpp @@ -259,7 +259,7 @@ void ChromiumHTTPDataSource::onDisconnectComplete() { mCondition.broadcast(); } -sp<DecryptHandle> ChromiumHTTPDataSource::DrmInitialization() { +sp<DecryptHandle> ChromiumHTTPDataSource::DrmInitialization(const char* mime) { Mutex::Autolock autoLock(mLock); if (mDrmManagerClient == NULL) { @@ -275,7 +275,7 @@ sp<DecryptHandle> ChromiumHTTPDataSource::DrmInitialization() { * original one */ mDecryptHandle = mDrmManagerClient->openDecryptSession( - String8(mURI.c_str())); + String8(mURI.c_str()), mime); } if (mDecryptHandle == NULL) { diff --git a/media/libstagefright/chromium_http/DataUriSource.cpp b/media/libstagefright/chromium_http/DataUriSource.cpp new file mode 100644 index 0000000..ecf3fa1 --- /dev/null +++ b/media/libstagefright/chromium_http/DataUriSource.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <include/DataUriSource.h> + +#include <net/base/data_url.h> +#include <googleurl/src/gurl.h> + + +namespace android { + +DataUriSource::DataUriSource(const char *uri) : + mDataUri(uri), + mInited(NO_INIT) { + + // Copy1: const char *uri -> String8 mDataUri. + std::string mimeTypeStr, unusedCharsetStr, dataStr; + // Copy2: String8 mDataUri -> std::string + const bool ret = net::DataURL::Parse( + GURL(std::string(mDataUri.string())), + &mimeTypeStr, &unusedCharsetStr, &dataStr); + // Copy3: std::string dataStr -> AString mData + mData.setTo(dataStr.data(), dataStr.length()); + mInited = ret ? OK : UNKNOWN_ERROR; + + // The chromium data url implementation defaults to using "text/plain" + // if no mime type is specified. We prefer to leave this unspecified + // instead, since the mime type is sniffed in most cases. + if (mimeTypeStr != "text/plain") { + mMimeType = mimeTypeStr.c_str(); + } +} + +ssize_t DataUriSource::readAt(off64_t offset, void *out, size_t size) { + if (mInited != OK) { + return mInited; + } + + const off64_t length = mData.size(); + if (offset >= length) { + return UNKNOWN_ERROR; + } + + const char *dataBuf = mData.c_str(); + const size_t bytesToCopy = + offset + size >= length ? (length - offset) : size; + + if (bytesToCopy > 0) { + memcpy(out, dataBuf + offset, bytesToCopy); + } + + return bytesToCopy; +} + +} // namespace android diff --git a/media/libstagefright/codecs/aacdec/SoftAAC.cpp b/media/libstagefright/codecs/aacdec/SoftAAC.cpp index da9d280..ea6c360 100644 --- a/media/libstagefright/codecs/aacdec/SoftAAC.cpp +++ b/media/libstagefright/codecs/aacdec/SoftAAC.cpp @@ -218,6 +218,18 @@ OMX_ERRORTYPE SoftAAC::internalSetParameter( return OMX_ErrorNone; } + case OMX_IndexParamAudioPcm: + { + const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex != 1) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + default: return SimpleSoftOMXComponent::internalSetParameter(index, params); } diff --git a/media/libstagefright/codecs/aacenc/AACEncoder.cpp b/media/libstagefright/codecs/aacenc/AACEncoder.cpp index 2b8633d..8b5007e 100644 --- a/media/libstagefright/codecs/aacenc/AACEncoder.cpp +++ b/media/libstagefright/codecs/aacenc/AACEncoder.cpp @@ -22,8 +22,8 @@ #include "voAAC.h" #include "cmnMemory.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> @@ -114,8 +114,8 @@ status_t AACEncoder::setAudioSpecificConfigData() { ALOGV("setAudioSpecificConfigData: %d hz, %d bps, and %d channels", mSampleRate, mBitRate, mChannels); - int32_t index; - CHECK_EQ(OK, getSampleRateTableIndex(mSampleRate, index)); + int32_t index = 0; + CHECK_EQ((status_t)OK, getSampleRateTableIndex(mSampleRate, index)); if (mChannels > 2 || mChannels <= 0) { ALOGE("Unsupported number of channels(%d)", mChannels); return UNKNOWN_ERROR; @@ -142,7 +142,7 @@ status_t AACEncoder::start(MetaData *params) { mBufferGroup = new MediaBufferGroup; mBufferGroup->add_buffer(new MediaBuffer(2048)); - CHECK_EQ(OK, initCheck()); + CHECK_EQ((status_t)OK, initCheck()); mNumInputSamples = 0; mAnchorTimeUs = 0; @@ -183,7 +183,7 @@ status_t AACEncoder::stop() { mSource->stop(); if (mEncoderHandle) { - CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle)); + CHECK_EQ((VO_U32)VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle)); mEncoderHandle = NULL; } delete mApiHandle; @@ -223,7 +223,7 @@ status_t AACEncoder::read( CHECK(options == NULL || !options->getSeekTo(&seekTimeUs, &mode)); MediaBuffer *buffer; - CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK); + CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), (status_t)OK); uint8_t *outPtr = (uint8_t *)buffer->data(); bool readFromSource = false; int64_t wallClockTimeUs = -1; @@ -255,7 +255,7 @@ status_t AACEncoder::read( } size_t align = mInputBuffer->range_length() % sizeof(int16_t); - CHECK_EQ(align, 0); + CHECK_EQ(align, (size_t)0); int64_t timeUs; if (mInputBuffer->meta_data()->findInt64(kKeyDriftTime, &timeUs)) { diff --git a/media/libstagefright/codecs/aacenc/Android.mk b/media/libstagefright/codecs/aacenc/Android.mk index c2579c7..509193c 100644 --- a/media/libstagefright/codecs/aacenc/Android.mk +++ b/media/libstagefright/codecs/aacenc/Android.mk @@ -85,3 +85,29 @@ LOCAL_C_INCLUDES += $(LOCAL_PATH)/src/asm/ARMV7 endif include $(BUILD_STATIC_LIBRARY) + +################################################################################ + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES := \ + SoftAACEncoder.cpp + +LOCAL_C_INCLUDES := \ + frameworks/base/media/libstagefright/include \ + frameworks/base/include/media/stagefright/openmax \ + frameworks/base/media/libstagefright/codecs/common/include \ + +LOCAL_CFLAGS := -DOSCL_IMPORT_REF= + +LOCAL_STATIC_LIBRARIES := \ + libstagefright_aacenc + +LOCAL_SHARED_LIBRARIES := \ + libstagefright_omx libstagefright_foundation libutils \ + libstagefright_enc_common + +LOCAL_MODULE := libstagefright_soft_aacenc +LOCAL_MODULE_TAGS := optional + +include $(BUILD_SHARED_LIBRARY) diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp new file mode 100644 index 0000000..c6724c2 --- /dev/null +++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp @@ -0,0 +1,560 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "SoftAACEncoder" +#include <utils/Log.h> + +#include "SoftAACEncoder.h" + +#include "voAAC.h" +#include "cmnMemory.h" + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/hexdump.h> + +namespace android { + +template<class T> +static void InitOMXParams(T *params) { + params->nSize = sizeof(T); + params->nVersion.s.nVersionMajor = 1; + params->nVersion.s.nVersionMinor = 0; + params->nVersion.s.nRevision = 0; + params->nVersion.s.nStep = 0; +} + +SoftAACEncoder::SoftAACEncoder( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component) + : SimpleSoftOMXComponent(name, callbacks, appData, component), + mEncoderHandle(NULL), + mApiHandle(NULL), + mMemOperator(NULL), + mNumChannels(1), + mSampleRate(44100), + mBitRate(0), + mSentCodecSpecificData(false), + mInputSize(0), + mInputFrame(NULL), + mInputTimeUs(-1ll), + mSawInputEOS(false), + mSignalledError(false) { + initPorts(); + CHECK_EQ(initEncoder(), (status_t)OK); + + setAudioParams(); +} + +SoftAACEncoder::~SoftAACEncoder() { + delete[] mInputFrame; + mInputFrame = NULL; + + if (mEncoderHandle) { + CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle)); + mEncoderHandle = NULL; + } + + delete mApiHandle; + mApiHandle = NULL; + + delete mMemOperator; + mMemOperator = NULL; +} + +void SoftAACEncoder::initPorts() { + OMX_PARAM_PORTDEFINITIONTYPE def; + InitOMXParams(&def); + + def.nPortIndex = 0; + def.eDir = OMX_DirInput; + def.nBufferCountMin = kNumBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t) * 2; + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainAudio; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 1; + + def.format.audio.cMIMEType = const_cast<char *>("audio/raw"); + def.format.audio.pNativeRender = NULL; + def.format.audio.bFlagErrorConcealment = OMX_FALSE; + def.format.audio.eEncoding = OMX_AUDIO_CodingPCM; + + addPort(def); + + def.nPortIndex = 1; + def.eDir = OMX_DirOutput; + def.nBufferCountMin = kNumBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = 8192; + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainAudio; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 2; + + def.format.audio.cMIMEType = const_cast<char *>("audio/aac"); + def.format.audio.pNativeRender = NULL; + def.format.audio.bFlagErrorConcealment = OMX_FALSE; + def.format.audio.eEncoding = OMX_AUDIO_CodingAAC; + + addPort(def); +} + +status_t SoftAACEncoder::initEncoder() { + mApiHandle = new VO_AUDIO_CODECAPI; + + if (VO_ERR_NONE != voGetAACEncAPI(mApiHandle)) { + ALOGE("Failed to get api handle"); + return UNKNOWN_ERROR; + } + + mMemOperator = new VO_MEM_OPERATOR; + mMemOperator->Alloc = cmnMemAlloc; + mMemOperator->Copy = cmnMemCopy; + mMemOperator->Free = cmnMemFree; + mMemOperator->Set = cmnMemSet; + mMemOperator->Check = cmnMemCheck; + + VO_CODEC_INIT_USERDATA userData; + memset(&userData, 0, sizeof(userData)); + userData.memflag = VO_IMF_USERMEMOPERATOR; + userData.memData = (VO_PTR) mMemOperator; + if (VO_ERR_NONE != + mApiHandle->Init(&mEncoderHandle, VO_AUDIO_CodingAAC, &userData)) { + ALOGE("Failed to init AAC encoder"); + return UNKNOWN_ERROR; + } + + return OK; +} + +OMX_ERRORTYPE SoftAACEncoder::internalGetParameter( + OMX_INDEXTYPE index, OMX_PTR params) { + switch (index) { + case OMX_IndexParamAudioPortFormat: + { + OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams = + (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params; + + if (formatParams->nPortIndex > 1) { + return OMX_ErrorUndefined; + } + + if (formatParams->nIndex > 0) { + return OMX_ErrorNoMore; + } + + formatParams->eEncoding = + (formatParams->nPortIndex == 0) + ? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAAC; + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioAac: + { + OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams = + (OMX_AUDIO_PARAM_AACPROFILETYPE *)params; + + if (aacParams->nPortIndex != 1) { + return OMX_ErrorUndefined; + } + + aacParams->nBitRate = mBitRate; + aacParams->nAudioBandWidth = 0; + aacParams->nAACtools = 0; + aacParams->nAACERtools = 0; + aacParams->eAACProfile = OMX_AUDIO_AACObjectMain; + aacParams->eAACStreamFormat = OMX_AUDIO_AACStreamFormatMP4FF; + aacParams->eChannelMode = OMX_AUDIO_ChannelModeStereo; + + aacParams->nChannels = mNumChannels; + aacParams->nSampleRate = mSampleRate; + aacParams->nFrameLength = 0; + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioPcm: + { + OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex != 0) { + return OMX_ErrorUndefined; + } + + pcmParams->eNumData = OMX_NumericalDataSigned; + pcmParams->eEndian = OMX_EndianBig; + pcmParams->bInterleaved = OMX_TRUE; + pcmParams->nBitPerSample = 16; + pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear; + pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF; + pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF; + + pcmParams->nChannels = mNumChannels; + pcmParams->nSamplingRate = mSampleRate; + + return OMX_ErrorNone; + } + + default: + return SimpleSoftOMXComponent::internalGetParameter(index, params); + } +} + +OMX_ERRORTYPE SoftAACEncoder::internalSetParameter( + OMX_INDEXTYPE index, const OMX_PTR params) { + switch (index) { + case OMX_IndexParamStandardComponentRole: + { + const OMX_PARAM_COMPONENTROLETYPE *roleParams = + (const OMX_PARAM_COMPONENTROLETYPE *)params; + + if (strncmp((const char *)roleParams->cRole, + "audio_encoder.aac", + OMX_MAX_STRINGNAME_SIZE - 1)) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioPortFormat: + { + const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams = + (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params; + + if (formatParams->nPortIndex > 1) { + return OMX_ErrorUndefined; + } + + if (formatParams->nIndex > 0) { + return OMX_ErrorNoMore; + } + + if ((formatParams->nPortIndex == 0 + && formatParams->eEncoding != OMX_AUDIO_CodingPCM) + || (formatParams->nPortIndex == 1 + && formatParams->eEncoding != OMX_AUDIO_CodingAAC)) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioAac: + { + OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams = + (OMX_AUDIO_PARAM_AACPROFILETYPE *)params; + + if (aacParams->nPortIndex != 1) { + return OMX_ErrorUndefined; + } + + mBitRate = aacParams->nBitRate; + mNumChannels = aacParams->nChannels; + mSampleRate = aacParams->nSampleRate; + + if (setAudioParams() != OK) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioPcm: + { + OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex != 0) { + return OMX_ErrorUndefined; + } + + mNumChannels = pcmParams->nChannels; + mSampleRate = pcmParams->nSamplingRate; + + if (setAudioParams() != OK) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + + default: + return SimpleSoftOMXComponent::internalSetParameter(index, params); + } +} + +status_t SoftAACEncoder::setAudioParams() { + // We call this whenever sample rate, number of channels or bitrate change + // in reponse to setParameter calls. + + ALOGV("setAudioParams: %lu Hz, %lu channels, %lu bps", + mSampleRate, mNumChannels, mBitRate); + + status_t err = setAudioSpecificConfigData(); + + if (err != OK) { + return err; + } + + AACENC_PARAM params; + memset(¶ms, 0, sizeof(params)); + params.sampleRate = mSampleRate; + params.bitRate = mBitRate; + params.nChannels = mNumChannels; + params.adtsUsed = 0; // We add adts header in the file writer if needed. + if (VO_ERR_NONE != mApiHandle->SetParam( + mEncoderHandle, VO_PID_AAC_ENCPARAM, ¶ms)) { + ALOGE("Failed to set AAC encoder parameters"); + return UNKNOWN_ERROR; + } + + return OK; +} + +static status_t getSampleRateTableIndex(int32_t sampleRate, int32_t &index) { + static const int32_t kSampleRateTable[] = { + 96000, 88200, 64000, 48000, 44100, 32000, + 24000, 22050, 16000, 12000, 11025, 8000 + }; + const int32_t tableSize = + sizeof(kSampleRateTable) / sizeof(kSampleRateTable[0]); + + for (int32_t i = 0; i < tableSize; ++i) { + if (sampleRate == kSampleRateTable[i]) { + index = i; + return OK; + } + } + + return UNKNOWN_ERROR; +} + +status_t SoftAACEncoder::setAudioSpecificConfigData() { + // The AAC encoder's audio specific config really only encodes + // number of channels and the sample rate (mapped to an index into + // a fixed sample rate table). + + int32_t index; + status_t err = getSampleRateTableIndex(mSampleRate, index); + if (err != OK) { + ALOGE("Unsupported sample rate (%lu Hz)", mSampleRate); + return err; + } + + if (mNumChannels > 2 || mNumChannels <= 0) { + ALOGE("Unsupported number of channels(%lu)", mNumChannels); + return UNKNOWN_ERROR; + } + + // OMX_AUDIO_AACObjectLC + mAudioSpecificConfigData[0] = ((0x02 << 3) | (index >> 1)); + mAudioSpecificConfigData[1] = ((index & 0x01) << 7) | (mNumChannels << 3); + + return OK; +} + +void SoftAACEncoder::onQueueFilled(OMX_U32 portIndex) { + if (mSignalledError) { + return; + } + + List<BufferInfo *> &inQueue = getPortQueue(0); + List<BufferInfo *> &outQueue = getPortQueue(1); + + if (!mSentCodecSpecificData) { + // The very first thing we want to output is the codec specific + // data. It does not require any input data but we will need an + // output buffer to store it in. + + if (outQueue.empty()) { + return; + } + + BufferInfo *outInfo = *outQueue.begin(); + OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; + outHeader->nFilledLen = sizeof(mAudioSpecificConfigData); + outHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG; + + uint8_t *out = outHeader->pBuffer + outHeader->nOffset; + memcpy(out, mAudioSpecificConfigData, sizeof(mAudioSpecificConfigData)); + +#if 0 + ALOGI("sending codec specific data."); + hexdump(out, sizeof(mAudioSpecificConfigData)); +#endif + + outQueue.erase(outQueue.begin()); + outInfo->mOwnedByUs = false; + notifyFillBufferDone(outHeader); + + mSentCodecSpecificData = true; + } + + size_t numBytesPerInputFrame = + mNumChannels * kNumSamplesPerFrame * sizeof(int16_t); + + for (;;) { + // We do the following until we run out of buffers. + + while (mInputSize < numBytesPerInputFrame) { + // As long as there's still input data to be read we + // will drain "kNumSamplesPerFrame * mNumChannels" samples + // into the "mInputFrame" buffer and then encode those + // as a unit into an output buffer. + + if (mSawInputEOS || inQueue.empty()) { + return; + } + + BufferInfo *inInfo = *inQueue.begin(); + OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; + + const void *inData = inHeader->pBuffer + inHeader->nOffset; + + size_t copy = numBytesPerInputFrame - mInputSize; + if (copy > inHeader->nFilledLen) { + copy = inHeader->nFilledLen; + } + + if (mInputFrame == NULL) { + mInputFrame = new int16_t[kNumSamplesPerFrame * mNumChannels]; + } + + if (mInputSize == 0) { + mInputTimeUs = inHeader->nTimeStamp; + } + + memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy); + mInputSize += copy; + + inHeader->nOffset += copy; + inHeader->nFilledLen -= copy; + + // "Time" on the input buffer has in effect advanced by the + // number of audio frames we just advanced nOffset by. + inHeader->nTimeStamp += + (copy * 1000000ll / mSampleRate) + / (mNumChannels * sizeof(int16_t)); + + if (inHeader->nFilledLen == 0) { + if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + ALOGV("saw input EOS"); + mSawInputEOS = true; + + // Pad any remaining data with zeroes. + memset((uint8_t *)mInputFrame + mInputSize, + 0, + numBytesPerInputFrame - mInputSize); + + mInputSize = numBytesPerInputFrame; + } + + inQueue.erase(inQueue.begin()); + inInfo->mOwnedByUs = false; + notifyEmptyBufferDone(inHeader); + + inData = NULL; + inHeader = NULL; + inInfo = NULL; + } + } + + // At this point we have all the input data necessary to encode + // a single frame, all we need is an output buffer to store the result + // in. + + if (outQueue.empty()) { + return; + } + + BufferInfo *outInfo = *outQueue.begin(); + OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; + + VO_CODECBUFFER inputData; + memset(&inputData, 0, sizeof(inputData)); + inputData.Buffer = (unsigned char *)mInputFrame; + inputData.Length = numBytesPerInputFrame; + CHECK(VO_ERR_NONE == + mApiHandle->SetInputData(mEncoderHandle, &inputData)); + + VO_CODECBUFFER outputData; + memset(&outputData, 0, sizeof(outputData)); + VO_AUDIO_OUTPUTINFO outputInfo; + memset(&outputInfo, 0, sizeof(outputInfo)); + + uint8_t *outPtr = (uint8_t *)outHeader->pBuffer + outHeader->nOffset; + size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset; + + VO_U32 ret = VO_ERR_NONE; + size_t nOutputBytes = 0; + do { + outputData.Buffer = outPtr; + outputData.Length = outAvailable - nOutputBytes; + ret = mApiHandle->GetOutputData( + mEncoderHandle, &outputData, &outputInfo); + if (ret == VO_ERR_NONE) { + outPtr += outputData.Length; + nOutputBytes += outputData.Length; + } + } while (ret != VO_ERR_INPUT_BUFFER_SMALL); + + outHeader->nFilledLen = nOutputBytes; + + outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME; + + if (mSawInputEOS) { + // We also tag this output buffer with EOS if it corresponds + // to the final input buffer. + outHeader->nFlags = OMX_BUFFERFLAG_EOS; + } + + outHeader->nTimeStamp = mInputTimeUs; + +#if 0 + ALOGI("sending %d bytes of data (time = %lld us, flags = 0x%08lx)", + nOutputBytes, mInputTimeUs, outHeader->nFlags); + + hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen); +#endif + + outQueue.erase(outQueue.begin()); + outInfo->mOwnedByUs = false; + notifyFillBufferDone(outHeader); + + outHeader = NULL; + outInfo = NULL; + + mInputSize = 0; + } +} + +} // namespace android + +android::SoftOMXComponent *createSoftOMXComponent( + const char *name, const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, OMX_COMPONENTTYPE **component) { + return new android::SoftAACEncoder(name, callbacks, appData, component); +} diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.h b/media/libstagefright/codecs/aacenc/SoftAACEncoder.h new file mode 100644 index 0000000..d148eb7 --- /dev/null +++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SOFT_AAC_ENCODER_H_ + +#define SOFT_AAC_ENCODER_H_ + +#include "SimpleSoftOMXComponent.h" + +struct VO_AUDIO_CODECAPI; +struct VO_MEM_OPERATOR; + +namespace android { + +struct SoftAACEncoder : public SimpleSoftOMXComponent { + SoftAACEncoder( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component); + +protected: + virtual ~SoftAACEncoder(); + + virtual OMX_ERRORTYPE internalGetParameter( + OMX_INDEXTYPE index, OMX_PTR params); + + virtual OMX_ERRORTYPE internalSetParameter( + OMX_INDEXTYPE index, const OMX_PTR params); + + virtual void onQueueFilled(OMX_U32 portIndex); + +private: + enum { + kNumBuffers = 4, + kNumSamplesPerFrame = 1024, + }; + + void *mEncoderHandle; + VO_AUDIO_CODECAPI *mApiHandle; + VO_MEM_OPERATOR *mMemOperator; + + OMX_U32 mNumChannels; + OMX_U32 mSampleRate; + OMX_U32 mBitRate; + + bool mSentCodecSpecificData; + size_t mInputSize; + int16_t *mInputFrame; + int64_t mInputTimeUs; + + bool mSawInputEOS; + + uint8_t mAudioSpecificConfigData[2]; + + bool mSignalledError; + + void initPorts(); + status_t initEncoder(); + + status_t setAudioSpecificConfigData(); + status_t setAudioParams(); + + DISALLOW_EVIL_CONSTRUCTORS(SoftAACEncoder); +}; + +} // namespace android + +#endif // SOFT_AAC_ENCODER_H_ diff --git a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/PrePostMDCT_v7.s b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/PrePostMDCT_v7.s index b2bc9d9..7f6b881 100644 --- a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/PrePostMDCT_v7.s +++ b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/PrePostMDCT_v7.s @@ -23,9 +23,13 @@ .section .text .global PreMDCT + .fnstart PreMDCT: stmdb sp!, {r4 - r11, lr} + .save {r4 - r11, lr} + fstmfdd sp!, {d8 - d15} + .vsave {d8 - d15} add r9, r0, r1, lsl #2 sub r3, r9, #32 @@ -74,14 +78,20 @@ PreMDCT_LOOP: bne PreMDCT_LOOP PreMDCT_END: + fldmfdd sp!, {d8 - d15} ldmia sp!, {r4 - r11, pc} @ENDP @ |PreMDCT| + .fnend .section .text .global PostMDCT + .fnstart PostMDCT: stmdb sp!, {r4 - r11, lr} + .save {r4 - r11, lr} + fstmfdd sp!, {d8 - d15} + .vsave {d8 - d15} add r9, r0, r1, lsl #2 sub r3, r9, #32 @@ -129,7 +139,8 @@ PostMDCT_LOOP: bne PostMDCT_LOOP PostMDCT_END: + fldmfdd sp!, {d8 - d15} ldmia sp!, {r4 - r11, pc} @ENDP @ |PostMDCT| - .end + .fnend diff --git a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/R4R8First_v7.s b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/R4R8First_v7.s index 3033156..03fa6a9 100644 --- a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/R4R8First_v7.s +++ b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/R4R8First_v7.s @@ -23,9 +23,13 @@ .section .text .global Radix8First + .fnstart Radix8First: stmdb sp!, {r4 - r11, lr} + .save {r4 - r11, lr} + fstmfdd sp!, {d8 - d15} + .vsave {d8 - d15} ldr r3, SQRT1_2 cmp r1, #0 @@ -103,17 +107,23 @@ Radix8First_LOOP: bne Radix8First_LOOP Radix8First_END: + fldmfdd sp!, {d8 - d15} ldmia sp!, {r4 - r11, pc} SQRT1_2: .word 0x2d413ccd @ENDP @ |Radix8First| + .fnend .section .text .global Radix4First + .fnstart Radix4First: stmdb sp!, {r4 - r11, lr} + .save {r4 - r11, lr} + fstmfdd sp!, {d8 - d15} + .vsave {d8 - d15} cmp r1, #0 beq Radix4First_END @@ -140,7 +150,8 @@ Radix4First_LOOP: bne Radix4First_LOOP Radix4First_END: + fldmfdd sp!, {d8 - d15} ldmia sp!, {r4 - r11, pc} @ENDP @ |Radix4First| - .end + .fnend diff --git a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/Radix4FFT_v7.s b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/Radix4FFT_v7.s index f874825..431bc30 100644 --- a/media/libstagefright/codecs/aacenc/src/asm/ARMV7/Radix4FFT_v7.s +++ b/media/libstagefright/codecs/aacenc/src/asm/ARMV7/Radix4FFT_v7.s @@ -23,9 +23,13 @@ .section .text .global Radix4FFT + .fnstart Radix4FFT: stmdb sp!, {r4 - r11, lr} + .save {r4 - r11, lr} + fstmfdd sp!, {d8 - d15} + .vsave {d8 - d15} mov r1, r1, asr #2 cmp r1, #0 @@ -137,7 +141,8 @@ Radix4FFT_LOOP1_END: bne Radix4FFT_LOOP1 Radix4FFT_END: + fldmfdd sp!, {d8 - d15} ldmia sp!, {r4 - r11, pc} @ENDP @ |Radix4FFT| - .end + .fnend diff --git a/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp index 3afbc4f..27d7e4d 100644 --- a/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp +++ b/media/libstagefright/codecs/amrnb/enc/AMRNBEncoder.cpp @@ -18,8 +18,8 @@ #include "gsmamr_enc.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> @@ -210,7 +210,7 @@ status_t AMRNBEncoder::read( } MediaBuffer *buffer; - CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK); + CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), (status_t)OK); uint8_t *outPtr = (uint8_t *)buffer->data(); diff --git a/media/libstagefright/codecs/amrnb/enc/Android.mk b/media/libstagefright/codecs/amrnb/enc/Android.mk index b6aed81..94e8726 100644 --- a/media/libstagefright/codecs/amrnb/enc/Android.mk +++ b/media/libstagefright/codecs/amrnb/enc/Android.mk @@ -74,3 +74,30 @@ LOCAL_CFLAGS := \ LOCAL_MODULE := libstagefright_amrnbenc include $(BUILD_STATIC_LIBRARY) + +################################################################################ + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES := \ + SoftAMRNBEncoder.cpp + +LOCAL_C_INCLUDES := \ + frameworks/base/media/libstagefright/include \ + frameworks/base/include/media/stagefright/openmax \ + $(LOCAL_PATH)/src \ + $(LOCAL_PATH)/include \ + $(LOCAL_PATH)/../common/include \ + $(LOCAL_PATH)/../common + +LOCAL_STATIC_LIBRARIES := \ + libstagefright_amrnbenc + +LOCAL_SHARED_LIBRARIES := \ + libstagefright_omx libstagefright_foundation libutils \ + libstagefright_amrnb_common + +LOCAL_MODULE := libstagefright_soft_amrnbenc +LOCAL_MODULE_TAGS := optional + +include $(BUILD_SHARED_LIBRARY) diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp new file mode 100644 index 0000000..07f8b4f --- /dev/null +++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp @@ -0,0 +1,404 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "SoftAMRNBEncoder" +#include <utils/Log.h> + +#include "SoftAMRNBEncoder.h" + +#include "gsmamr_enc.h" + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/hexdump.h> + +namespace android { + +static const int32_t kSampleRate = 8000; + +template<class T> +static void InitOMXParams(T *params) { + params->nSize = sizeof(T); + params->nVersion.s.nVersionMajor = 1; + params->nVersion.s.nVersionMinor = 0; + params->nVersion.s.nRevision = 0; + params->nVersion.s.nStep = 0; +} + +SoftAMRNBEncoder::SoftAMRNBEncoder( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component) + : SimpleSoftOMXComponent(name, callbacks, appData, component), + mEncState(NULL), + mSidState(NULL), + mBitRate(0), + mMode(MR475), + mInputSize(0), + mInputTimeUs(-1ll), + mSawInputEOS(false), + mSignalledError(false) { + initPorts(); + CHECK_EQ(initEncoder(), (status_t)OK); +} + +SoftAMRNBEncoder::~SoftAMRNBEncoder() { + if (mEncState != NULL) { + AMREncodeExit(&mEncState, &mSidState); + mEncState = mSidState = NULL; + } +} + +void SoftAMRNBEncoder::initPorts() { + OMX_PARAM_PORTDEFINITIONTYPE def; + InitOMXParams(&def); + + def.nPortIndex = 0; + def.eDir = OMX_DirInput; + def.nBufferCountMin = kNumBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t); + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainAudio; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 1; + + def.format.audio.cMIMEType = const_cast<char *>("audio/raw"); + def.format.audio.pNativeRender = NULL; + def.format.audio.bFlagErrorConcealment = OMX_FALSE; + def.format.audio.eEncoding = OMX_AUDIO_CodingPCM; + + addPort(def); + + def.nPortIndex = 1; + def.eDir = OMX_DirOutput; + def.nBufferCountMin = kNumBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = 8192; + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainAudio; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 2; + + def.format.audio.cMIMEType = const_cast<char *>("audio/3gpp"); + def.format.audio.pNativeRender = NULL; + def.format.audio.bFlagErrorConcealment = OMX_FALSE; + def.format.audio.eEncoding = OMX_AUDIO_CodingAMR; + + addPort(def); +} + +status_t SoftAMRNBEncoder::initEncoder() { + if (AMREncodeInit(&mEncState, &mSidState, false /* dtx_enable */) != 0) { + return UNKNOWN_ERROR; + } + + return OK; +} + +OMX_ERRORTYPE SoftAMRNBEncoder::internalGetParameter( + OMX_INDEXTYPE index, OMX_PTR params) { + switch (index) { + case OMX_IndexParamAudioPortFormat: + { + OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams = + (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params; + + if (formatParams->nPortIndex > 1) { + return OMX_ErrorUndefined; + } + + if (formatParams->nIndex > 0) { + return OMX_ErrorNoMore; + } + + formatParams->eEncoding = + (formatParams->nPortIndex == 0) + ? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAMR; + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioAmr: + { + OMX_AUDIO_PARAM_AMRTYPE *amrParams = + (OMX_AUDIO_PARAM_AMRTYPE *)params; + + if (amrParams->nPortIndex != 1) { + return OMX_ErrorUndefined; + } + + amrParams->nChannels = 1; + amrParams->nBitRate = mBitRate; + amrParams->eAMRBandMode = (OMX_AUDIO_AMRBANDMODETYPE)(mMode + 1); + amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff; + amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF; + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioPcm: + { + OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex != 0) { + return OMX_ErrorUndefined; + } + + pcmParams->eNumData = OMX_NumericalDataSigned; + pcmParams->eEndian = OMX_EndianBig; + pcmParams->bInterleaved = OMX_TRUE; + pcmParams->nBitPerSample = 16; + pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear; + pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelCF; + + pcmParams->nChannels = 1; + pcmParams->nSamplingRate = kSampleRate; + + return OMX_ErrorNone; + } + + default: + return SimpleSoftOMXComponent::internalGetParameter(index, params); + } +} + +OMX_ERRORTYPE SoftAMRNBEncoder::internalSetParameter( + OMX_INDEXTYPE index, const OMX_PTR params) { + switch (index) { + case OMX_IndexParamStandardComponentRole: + { + const OMX_PARAM_COMPONENTROLETYPE *roleParams = + (const OMX_PARAM_COMPONENTROLETYPE *)params; + + if (strncmp((const char *)roleParams->cRole, + "audio_encoder.amrnb", + OMX_MAX_STRINGNAME_SIZE - 1)) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioPortFormat: + { + const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams = + (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params; + + if (formatParams->nPortIndex > 1) { + return OMX_ErrorUndefined; + } + + if (formatParams->nIndex > 0) { + return OMX_ErrorNoMore; + } + + if ((formatParams->nPortIndex == 0 + && formatParams->eEncoding != OMX_AUDIO_CodingPCM) + || (formatParams->nPortIndex == 1 + && formatParams->eEncoding != OMX_AUDIO_CodingAMR)) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioAmr: + { + OMX_AUDIO_PARAM_AMRTYPE *amrParams = + (OMX_AUDIO_PARAM_AMRTYPE *)params; + + if (amrParams->nPortIndex != 1) { + return OMX_ErrorUndefined; + } + + if (amrParams->nChannels != 1 + || amrParams->eAMRDTXMode != OMX_AUDIO_AMRDTXModeOff + || amrParams->eAMRFrameFormat + != OMX_AUDIO_AMRFrameFormatFSF + || amrParams->eAMRBandMode < OMX_AUDIO_AMRBandModeNB0 + || amrParams->eAMRBandMode > OMX_AUDIO_AMRBandModeNB7) { + return OMX_ErrorUndefined; + } + + mBitRate = amrParams->nBitRate; + mMode = amrParams->eAMRBandMode - 1; + + amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff; + amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF; + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioPcm: + { + OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex != 0) { + return OMX_ErrorUndefined; + } + + if (pcmParams->nChannels != 1 + || pcmParams->nSamplingRate != kSampleRate) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + + default: + return SimpleSoftOMXComponent::internalSetParameter(index, params); + } +} + +void SoftAMRNBEncoder::onQueueFilled(OMX_U32 portIndex) { + if (mSignalledError) { + return; + } + + List<BufferInfo *> &inQueue = getPortQueue(0); + List<BufferInfo *> &outQueue = getPortQueue(1); + + size_t numBytesPerInputFrame = kNumSamplesPerFrame * sizeof(int16_t); + + for (;;) { + // We do the following until we run out of buffers. + + while (mInputSize < numBytesPerInputFrame) { + // As long as there's still input data to be read we + // will drain "kNumSamplesPerFrame" samples + // into the "mInputFrame" buffer and then encode those + // as a unit into an output buffer. + + if (mSawInputEOS || inQueue.empty()) { + return; + } + + BufferInfo *inInfo = *inQueue.begin(); + OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; + + const void *inData = inHeader->pBuffer + inHeader->nOffset; + + size_t copy = numBytesPerInputFrame - mInputSize; + if (copy > inHeader->nFilledLen) { + copy = inHeader->nFilledLen; + } + + if (mInputSize == 0) { + mInputTimeUs = inHeader->nTimeStamp; + } + + memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy); + mInputSize += copy; + + inHeader->nOffset += copy; + inHeader->nFilledLen -= copy; + + // "Time" on the input buffer has in effect advanced by the + // number of audio frames we just advanced nOffset by. + inHeader->nTimeStamp += + (copy * 1000000ll / kSampleRate) / sizeof(int16_t); + + if (inHeader->nFilledLen == 0) { + if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + ALOGV("saw input EOS"); + mSawInputEOS = true; + + // Pad any remaining data with zeroes. + memset((uint8_t *)mInputFrame + mInputSize, + 0, + numBytesPerInputFrame - mInputSize); + + mInputSize = numBytesPerInputFrame; + } + + inQueue.erase(inQueue.begin()); + inInfo->mOwnedByUs = false; + notifyEmptyBufferDone(inHeader); + + inData = NULL; + inHeader = NULL; + inInfo = NULL; + } + } + + // At this point we have all the input data necessary to encode + // a single frame, all we need is an output buffer to store the result + // in. + + if (outQueue.empty()) { + return; + } + + BufferInfo *outInfo = *outQueue.begin(); + OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; + + uint8_t *outPtr = outHeader->pBuffer + outHeader->nOffset; + size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset; + + Frame_Type_3GPP frameType; + int res = AMREncode( + mEncState, mSidState, (Mode)mMode, + mInputFrame, outPtr, &frameType, AMR_TX_WMF); + + CHECK_GE(res, 0); + CHECK_LE((size_t)res, outAvailable); + + // Convert header byte from WMF to IETF format. + outPtr[0] = ((outPtr[0] << 3) | 4) & 0x7c; + + outHeader->nFilledLen = res; + outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME; + + if (mSawInputEOS) { + // We also tag this output buffer with EOS if it corresponds + // to the final input buffer. + outHeader->nFlags = OMX_BUFFERFLAG_EOS; + } + + outHeader->nTimeStamp = mInputTimeUs; + +#if 0 + ALOGI("sending %d bytes of data (time = %lld us, flags = 0x%08lx)", + nOutputBytes, mInputTimeUs, outHeader->nFlags); + + hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen); +#endif + + outQueue.erase(outQueue.begin()); + outInfo->mOwnedByUs = false; + notifyFillBufferDone(outHeader); + + outHeader = NULL; + outInfo = NULL; + + mInputSize = 0; + } +} + +} // namespace android + +android::SoftOMXComponent *createSoftOMXComponent( + const char *name, const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, OMX_COMPONENTTYPE **component) { + return new android::SoftAMRNBEncoder(name, callbacks, appData, component); +} diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h new file mode 100644 index 0000000..50178c4 --- /dev/null +++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SOFT_AMRNB_ENCODER_H_ + +#define SOFT_AMRNB_ENCODER_H_ + +#include "SimpleSoftOMXComponent.h" + +namespace android { + +struct SoftAMRNBEncoder : public SimpleSoftOMXComponent { + SoftAMRNBEncoder( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component); + +protected: + virtual ~SoftAMRNBEncoder(); + + virtual OMX_ERRORTYPE internalGetParameter( + OMX_INDEXTYPE index, OMX_PTR params); + + virtual OMX_ERRORTYPE internalSetParameter( + OMX_INDEXTYPE index, const OMX_PTR params); + + virtual void onQueueFilled(OMX_U32 portIndex); + +private: + enum { + kNumBuffers = 4, + kNumSamplesPerFrame = 160, + }; + + void *mEncState; + void *mSidState; + + OMX_U32 mBitRate; + int mMode; + + size_t mInputSize; + int16_t mInputFrame[kNumSamplesPerFrame]; + int64_t mInputTimeUs; + + bool mSawInputEOS; + bool mSignalledError; + + void initPorts(); + status_t initEncoder(); + + status_t setAudioParams(); + + DISALLOW_EVIL_CONSTRUCTORS(SoftAMRNBEncoder); +}; + +} // namespace android + +#endif // SOFT_AMRNB_ENCODER_H_ diff --git a/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp index 60b1163..7fd3a95 100644 --- a/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp +++ b/media/libstagefright/codecs/amrwbenc/AMRWBEncoder.cpp @@ -22,8 +22,8 @@ #include "voAMRWB.h" #include "cmnMemory.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> @@ -134,7 +134,7 @@ status_t AMRWBEncoder::start(MetaData *params) { // The largest buffer size is header + 477 bits mBufferGroup->add_buffer(new MediaBuffer(1024)); - CHECK_EQ(OK, initCheck()); + CHECK_EQ((status_t)OK, initCheck()); mNumFramesOutput = 0; @@ -163,7 +163,7 @@ status_t AMRWBEncoder::stop() { mBufferGroup = NULL; - CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle)); + CHECK_EQ((VO_U32)VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle)); mEncoderHandle = NULL; delete mApiHandle; @@ -222,7 +222,7 @@ status_t AMRWBEncoder::read( } size_t align = mInputBuffer->range_length() % sizeof(int16_t); - CHECK_EQ(align, 0); + CHECK_EQ(align, (size_t)0); int64_t timeUs; if (mInputBuffer->meta_data()->findInt64(kKeyDriftTime, &timeUs)) { @@ -271,7 +271,7 @@ status_t AMRWBEncoder::read( CHECK(VO_ERR_NONE == mApiHandle->SetInputData(mEncoderHandle,&inputData)); MediaBuffer *buffer; - CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), OK); + CHECK_EQ(mBufferGroup->acquire_buffer(&buffer), (status_t)OK); uint8_t *outPtr = (uint8_t *)buffer->data(); VO_CODECBUFFER outputData; diff --git a/media/libstagefright/codecs/amrwbenc/Android.mk b/media/libstagefright/codecs/amrwbenc/Android.mk index ae43870..6ce6171 100644 --- a/media/libstagefright/codecs/amrwbenc/Android.mk +++ b/media/libstagefright/codecs/amrwbenc/Android.mk @@ -117,4 +117,26 @@ endif include $(BUILD_STATIC_LIBRARY) +################################################################################ +include $(CLEAR_VARS) + +LOCAL_SRC_FILES := \ + SoftAMRWBEncoder.cpp + +LOCAL_C_INCLUDES := \ + frameworks/base/media/libstagefright/include \ + frameworks/base/include/media/stagefright/openmax \ + frameworks/base/media/libstagefright/codecs/common/include \ + +LOCAL_STATIC_LIBRARIES := \ + libstagefright_amrwbenc + +LOCAL_SHARED_LIBRARIES := \ + libstagefright_omx libstagefright_foundation libutils \ + libstagefright_enc_common + +LOCAL_MODULE := libstagefright_soft_amrwbenc +LOCAL_MODULE_TAGS := optional + +include $(BUILD_SHARED_LIBRARY) diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp new file mode 100644 index 0000000..9ccb49c --- /dev/null +++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp @@ -0,0 +1,459 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "SoftAMRWBEncoder" +#include <utils/Log.h> + +#include "SoftAMRWBEncoder.h" + +#include "cmnMemory.h" + +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/hexdump.h> + +namespace android { + +static const int32_t kSampleRate = 16000; + +template<class T> +static void InitOMXParams(T *params) { + params->nSize = sizeof(T); + params->nVersion.s.nVersionMajor = 1; + params->nVersion.s.nVersionMinor = 0; + params->nVersion.s.nRevision = 0; + params->nVersion.s.nStep = 0; +} + +SoftAMRWBEncoder::SoftAMRWBEncoder( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component) + : SimpleSoftOMXComponent(name, callbacks, appData, component), + mEncoderHandle(NULL), + mApiHandle(NULL), + mMemOperator(NULL), + mBitRate(0), + mMode(VOAMRWB_MD66), + mInputSize(0), + mInputTimeUs(-1ll), + mSawInputEOS(false), + mSignalledError(false) { + initPorts(); + CHECK_EQ(initEncoder(), (status_t)OK); +} + +SoftAMRWBEncoder::~SoftAMRWBEncoder() { + if (mEncoderHandle != NULL) { + CHECK_EQ(VO_ERR_NONE, mApiHandle->Uninit(mEncoderHandle)); + mEncoderHandle = NULL; + } + + delete mApiHandle; + mApiHandle = NULL; + + delete mMemOperator; + mMemOperator = NULL; +} + +void SoftAMRWBEncoder::initPorts() { + OMX_PARAM_PORTDEFINITIONTYPE def; + InitOMXParams(&def); + + def.nPortIndex = 0; + def.eDir = OMX_DirInput; + def.nBufferCountMin = kNumBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = kNumSamplesPerFrame * sizeof(int16_t); + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainAudio; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 1; + + def.format.audio.cMIMEType = const_cast<char *>("audio/raw"); + def.format.audio.pNativeRender = NULL; + def.format.audio.bFlagErrorConcealment = OMX_FALSE; + def.format.audio.eEncoding = OMX_AUDIO_CodingPCM; + + addPort(def); + + def.nPortIndex = 1; + def.eDir = OMX_DirOutput; + def.nBufferCountMin = kNumBuffers; + def.nBufferCountActual = def.nBufferCountMin; + def.nBufferSize = 8192; + def.bEnabled = OMX_TRUE; + def.bPopulated = OMX_FALSE; + def.eDomain = OMX_PortDomainAudio; + def.bBuffersContiguous = OMX_FALSE; + def.nBufferAlignment = 2; + + def.format.audio.cMIMEType = const_cast<char *>("audio/amr-wb"); + def.format.audio.pNativeRender = NULL; + def.format.audio.bFlagErrorConcealment = OMX_FALSE; + def.format.audio.eEncoding = OMX_AUDIO_CodingAMR; + + addPort(def); +} + +status_t SoftAMRWBEncoder::initEncoder() { + mApiHandle = new VO_AUDIO_CODECAPI; + + if (VO_ERR_NONE != voGetAMRWBEncAPI(mApiHandle)) { + ALOGE("Failed to get api handle"); + return UNKNOWN_ERROR; + } + + mMemOperator = new VO_MEM_OPERATOR; + mMemOperator->Alloc = cmnMemAlloc; + mMemOperator->Copy = cmnMemCopy; + mMemOperator->Free = cmnMemFree; + mMemOperator->Set = cmnMemSet; + mMemOperator->Check = cmnMemCheck; + + VO_CODEC_INIT_USERDATA userData; + memset(&userData, 0, sizeof(userData)); + userData.memflag = VO_IMF_USERMEMOPERATOR; + userData.memData = (VO_PTR) mMemOperator; + + if (VO_ERR_NONE != mApiHandle->Init( + &mEncoderHandle, VO_AUDIO_CodingAMRWB, &userData)) { + ALOGE("Failed to init AMRWB encoder"); + return UNKNOWN_ERROR; + } + + VOAMRWBFRAMETYPE type = VOAMRWB_RFC3267; + if (VO_ERR_NONE != mApiHandle->SetParam( + mEncoderHandle, VO_PID_AMRWB_FRAMETYPE, &type)) { + ALOGE("Failed to set AMRWB encoder frame type to %d", type); + return UNKNOWN_ERROR; + } + + return OK; +} + +OMX_ERRORTYPE SoftAMRWBEncoder::internalGetParameter( + OMX_INDEXTYPE index, OMX_PTR params) { + switch (index) { + case OMX_IndexParamAudioPortFormat: + { + OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams = + (OMX_AUDIO_PARAM_PORTFORMATTYPE *)params; + + if (formatParams->nPortIndex > 1) { + return OMX_ErrorUndefined; + } + + if (formatParams->nIndex > 0) { + return OMX_ErrorNoMore; + } + + formatParams->eEncoding = + (formatParams->nPortIndex == 0) + ? OMX_AUDIO_CodingPCM : OMX_AUDIO_CodingAMR; + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioAmr: + { + OMX_AUDIO_PARAM_AMRTYPE *amrParams = + (OMX_AUDIO_PARAM_AMRTYPE *)params; + + if (amrParams->nPortIndex != 1) { + return OMX_ErrorUndefined; + } + + amrParams->nChannels = 1; + amrParams->nBitRate = mBitRate; + + amrParams->eAMRBandMode = + (OMX_AUDIO_AMRBANDMODETYPE)(mMode + OMX_AUDIO_AMRBandModeWB0); + + amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff; + amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF; + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioPcm: + { + OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex != 0) { + return OMX_ErrorUndefined; + } + + pcmParams->eNumData = OMX_NumericalDataSigned; + pcmParams->eEndian = OMX_EndianBig; + pcmParams->bInterleaved = OMX_TRUE; + pcmParams->nBitPerSample = 16; + pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear; + pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelCF; + + pcmParams->nChannels = 1; + pcmParams->nSamplingRate = kSampleRate; + + return OMX_ErrorNone; + } + + default: + return SimpleSoftOMXComponent::internalGetParameter(index, params); + } +} + +OMX_ERRORTYPE SoftAMRWBEncoder::internalSetParameter( + OMX_INDEXTYPE index, const OMX_PTR params) { + switch (index) { + case OMX_IndexParamStandardComponentRole: + { + const OMX_PARAM_COMPONENTROLETYPE *roleParams = + (const OMX_PARAM_COMPONENTROLETYPE *)params; + + if (strncmp((const char *)roleParams->cRole, + "audio_encoder.amrwb", + OMX_MAX_STRINGNAME_SIZE - 1)) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioPortFormat: + { + const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams = + (const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params; + + if (formatParams->nPortIndex > 1) { + return OMX_ErrorUndefined; + } + + if (formatParams->nIndex > 0) { + return OMX_ErrorNoMore; + } + + if ((formatParams->nPortIndex == 0 + && formatParams->eEncoding != OMX_AUDIO_CodingPCM) + || (formatParams->nPortIndex == 1 + && formatParams->eEncoding != OMX_AUDIO_CodingAMR)) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioAmr: + { + OMX_AUDIO_PARAM_AMRTYPE *amrParams = + (OMX_AUDIO_PARAM_AMRTYPE *)params; + + if (amrParams->nPortIndex != 1) { + return OMX_ErrorUndefined; + } + + if (amrParams->nChannels != 1 + || amrParams->eAMRDTXMode != OMX_AUDIO_AMRDTXModeOff + || amrParams->eAMRFrameFormat + != OMX_AUDIO_AMRFrameFormatFSF + || amrParams->eAMRBandMode < OMX_AUDIO_AMRBandModeWB0 + || amrParams->eAMRBandMode > OMX_AUDIO_AMRBandModeWB8) { + return OMX_ErrorUndefined; + } + + mBitRate = amrParams->nBitRate; + + mMode = (VOAMRWBMODE)( + amrParams->eAMRBandMode - OMX_AUDIO_AMRBandModeWB0); + + amrParams->eAMRDTXMode = OMX_AUDIO_AMRDTXModeOff; + amrParams->eAMRFrameFormat = OMX_AUDIO_AMRFrameFormatFSF; + + if (VO_ERR_NONE != + mApiHandle->SetParam( + mEncoderHandle, VO_PID_AMRWB_MODE, &mMode)) { + ALOGE("Failed to set AMRWB encoder mode to %d", mMode); + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + case OMX_IndexParamAudioPcm: + { + OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = + (OMX_AUDIO_PARAM_PCMMODETYPE *)params; + + if (pcmParams->nPortIndex != 0) { + return OMX_ErrorUndefined; + } + + if (pcmParams->nChannels != 1 + || pcmParams->nSamplingRate != (OMX_U32)kSampleRate) { + return OMX_ErrorUndefined; + } + + return OMX_ErrorNone; + } + + + default: + return SimpleSoftOMXComponent::internalSetParameter(index, params); + } +} + +void SoftAMRWBEncoder::onQueueFilled(OMX_U32 portIndex) { + if (mSignalledError) { + return; + } + + List<BufferInfo *> &inQueue = getPortQueue(0); + List<BufferInfo *> &outQueue = getPortQueue(1); + + size_t numBytesPerInputFrame = kNumSamplesPerFrame * sizeof(int16_t); + + for (;;) { + // We do the following until we run out of buffers. + + while (mInputSize < numBytesPerInputFrame) { + // As long as there's still input data to be read we + // will drain "kNumSamplesPerFrame" samples + // into the "mInputFrame" buffer and then encode those + // as a unit into an output buffer. + + if (mSawInputEOS || inQueue.empty()) { + return; + } + + BufferInfo *inInfo = *inQueue.begin(); + OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader; + + const void *inData = inHeader->pBuffer + inHeader->nOffset; + + size_t copy = numBytesPerInputFrame - mInputSize; + if (copy > inHeader->nFilledLen) { + copy = inHeader->nFilledLen; + } + + if (mInputSize == 0) { + mInputTimeUs = inHeader->nTimeStamp; + } + + memcpy((uint8_t *)mInputFrame + mInputSize, inData, copy); + mInputSize += copy; + + inHeader->nOffset += copy; + inHeader->nFilledLen -= copy; + + // "Time" on the input buffer has in effect advanced by the + // number of audio frames we just advanced nOffset by. + inHeader->nTimeStamp += + (copy * 1000000ll / kSampleRate) / sizeof(int16_t); + + if (inHeader->nFilledLen == 0) { + if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { + ALOGV("saw input EOS"); + mSawInputEOS = true; + + // Pad any remaining data with zeroes. + memset((uint8_t *)mInputFrame + mInputSize, + 0, + numBytesPerInputFrame - mInputSize); + + mInputSize = numBytesPerInputFrame; + } + + inQueue.erase(inQueue.begin()); + inInfo->mOwnedByUs = false; + notifyEmptyBufferDone(inHeader); + + inData = NULL; + inHeader = NULL; + inInfo = NULL; + } + } + + // At this point we have all the input data necessary to encode + // a single frame, all we need is an output buffer to store the result + // in. + + if (outQueue.empty()) { + return; + } + + BufferInfo *outInfo = *outQueue.begin(); + OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; + + uint8_t *outPtr = outHeader->pBuffer + outHeader->nOffset; + size_t outAvailable = outHeader->nAllocLen - outHeader->nOffset; + + VO_CODECBUFFER inputData; + memset(&inputData, 0, sizeof(inputData)); + inputData.Buffer = (unsigned char *) mInputFrame; + inputData.Length = mInputSize; + + CHECK_EQ(VO_ERR_NONE, + mApiHandle->SetInputData(mEncoderHandle, &inputData)); + + VO_CODECBUFFER outputData; + memset(&outputData, 0, sizeof(outputData)); + VO_AUDIO_OUTPUTINFO outputInfo; + memset(&outputInfo, 0, sizeof(outputInfo)); + + outputData.Buffer = outPtr; + outputData.Length = outAvailable; + VO_U32 ret = mApiHandle->GetOutputData( + mEncoderHandle, &outputData, &outputInfo); + CHECK(ret == VO_ERR_NONE || ret == VO_ERR_INPUT_BUFFER_SMALL); + + outHeader->nFilledLen = outputData.Length; + outHeader->nFlags = OMX_BUFFERFLAG_ENDOFFRAME; + + if (mSawInputEOS) { + // We also tag this output buffer with EOS if it corresponds + // to the final input buffer. + outHeader->nFlags = OMX_BUFFERFLAG_EOS; + } + + outHeader->nTimeStamp = mInputTimeUs; + +#if 0 + ALOGI("sending %ld bytes of data (time = %lld us, flags = 0x%08lx)", + outHeader->nFilledLen, mInputTimeUs, outHeader->nFlags); + + hexdump(outHeader->pBuffer + outHeader->nOffset, outHeader->nFilledLen); +#endif + + outQueue.erase(outQueue.begin()); + outInfo->mOwnedByUs = false; + notifyFillBufferDone(outHeader); + + outHeader = NULL; + outInfo = NULL; + + mInputSize = 0; + } +} + +} // namespace android + +android::SoftOMXComponent *createSoftOMXComponent( + const char *name, const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, OMX_COMPONENTTYPE **component) { + return new android::SoftAMRWBEncoder(name, callbacks, appData, component); +} diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h new file mode 100644 index 0000000..d0c1dab --- /dev/null +++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SOFT_AMRWB_ENCODER_H_ + +#define SOFT_AMRWB_ENCODER_H_ + +#include "SimpleSoftOMXComponent.h" + +#include "voAMRWB.h" + +struct VO_AUDIO_CODECAPI; +struct VO_MEM_OPERATOR; + +namespace android { + +struct SoftAMRWBEncoder : public SimpleSoftOMXComponent { + SoftAMRWBEncoder( + const char *name, + const OMX_CALLBACKTYPE *callbacks, + OMX_PTR appData, + OMX_COMPONENTTYPE **component); + +protected: + virtual ~SoftAMRWBEncoder(); + + virtual OMX_ERRORTYPE internalGetParameter( + OMX_INDEXTYPE index, OMX_PTR params); + + virtual OMX_ERRORTYPE internalSetParameter( + OMX_INDEXTYPE index, const OMX_PTR params); + + virtual void onQueueFilled(OMX_U32 portIndex); + +private: + enum { + kNumBuffers = 4, + kNumSamplesPerFrame = 320, + }; + + void *mEncoderHandle; + VO_AUDIO_CODECAPI *mApiHandle; + VO_MEM_OPERATOR *mMemOperator; + + OMX_U32 mBitRate; + VOAMRWBMODE mMode; + + size_t mInputSize; + int16_t mInputFrame[kNumSamplesPerFrame]; + int64_t mInputTimeUs; + + bool mSawInputEOS; + bool mSignalledError; + + void initPorts(); + status_t initEncoder(); + + DISALLOW_EVIL_CONSTRUCTORS(SoftAMRWBEncoder); +}; + +} // namespace android + +#endif // SOFT_AMRWB_ENCODER_H_ diff --git a/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp index e202a2b..7533f07 100644 --- a/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp +++ b/media/libstagefright/codecs/avc/enc/AVCEncoder.cpp @@ -24,8 +24,8 @@ #include "avcenc_int.h" #include "OMX_Video.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> @@ -417,7 +417,7 @@ status_t AVCEncoder::read( *out = NULL; MediaBuffer *outputBuffer; - CHECK_EQ(OK, mGroup->acquire_buffer(&outputBuffer)); + CHECK_EQ((status_t)OK, mGroup->acquire_buffer(&outputBuffer)); uint8_t *outPtr = (uint8_t *) outputBuffer->data(); uint32_t dataLength = outputBuffer->size(); @@ -557,9 +557,9 @@ status_t AVCEncoder::read( encoderStatus = PVAVCEncodeNAL(mHandle, outPtr, &dataLength, &type); if (encoderStatus == AVCENC_SUCCESS) { outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame); - CHECK_EQ(NULL, PVAVCEncGetOverrunBuffer(mHandle)); + CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle)); } else if (encoderStatus == AVCENC_PICTURE_READY) { - CHECK_EQ(NULL, PVAVCEncGetOverrunBuffer(mHandle)); + CHECK(NULL == PVAVCEncGetOverrunBuffer(mHandle)); if (mIsIDRFrame) { outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, mIsIDRFrame); mIsIDRFrame = 0; diff --git a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp index d538603..20b0f8d 100644 --- a/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp +++ b/media/libstagefright/codecs/m4v_h263/enc/M4vH263Encoder.cpp @@ -23,8 +23,8 @@ #include "mp4enc_api.h" #include "OMX_Video.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MediaBufferGroup.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MetaData.h> @@ -379,7 +379,7 @@ status_t M4vH263Encoder::read( *out = NULL; MediaBuffer *outputBuffer; - CHECK_EQ(OK, mGroup->acquire_buffer(&outputBuffer)); + CHECK_EQ((status_t)OK, mGroup->acquire_buffer(&outputBuffer)); uint8_t *outPtr = (uint8_t *) outputBuffer->data(); int32_t dataLength = outputBuffer->size(); @@ -467,7 +467,7 @@ status_t M4vH263Encoder::read( mInputBuffer = NULL; return UNKNOWN_ERROR; } - CHECK_EQ(NULL, PVGetOverrunBuffer(mHandle)); + CHECK(NULL == PVGetOverrunBuffer(mHandle)); if (hintTrack.CodeType == 0) { // I-frame serves as sync frame outputBuffer->meta_data()->setInt32(kKeyIsSyncFrame, 1); } diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp index 5cc3f78..597167f 100644 --- a/media/libstagefright/colorconversion/ColorConverter.cpp +++ b/media/libstagefright/colorconversion/ColorConverter.cpp @@ -18,8 +18,8 @@ #define LOG_TAG "ColorConverter" #include <utils/Log.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/ColorConverter.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaErrors.h> namespace android { @@ -144,8 +144,8 @@ status_t ColorConverter::convertCbYCrY( return ERROR_UNSUPPORTED; } - uint32_t *dst_ptr = (uint32_t *)dst.mBits - + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2; + uint16_t *dst_ptr = (uint16_t *)dst.mBits + + dst.mCropTop * dst.mWidth + dst.mCropLeft; const uint8_t *src_ptr = (const uint8_t *)src.mBits + (src.mCropTop * dst.mWidth + src.mCropLeft) * 2; @@ -182,11 +182,15 @@ status_t ColorConverter::convertCbYCrY( | ((kAdjustedClip[g2] >> 2) << 5) | (kAdjustedClip[b2] >> 3); - dst_ptr[x / 2] = (rgb2 << 16) | rgb1; + if (x + 1 < src.cropWidth()) { + *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1; + } else { + dst_ptr[x] = rgb1; + } } src_ptr += src.mWidth * 2; - dst_ptr += dst.mWidth / 2; + dst_ptr += dst.mWidth; } return OK; @@ -290,15 +294,14 @@ status_t ColorConverter::convertQCOMYUV420SemiPlanar( const BitmapParams &src, const BitmapParams &dst) { uint8_t *kAdjustedClip = initClip(); - if (!((dst.mWidth & 3) == 0 - && (src.mCropLeft & 1) == 0 + if (!((src.mCropLeft & 1) == 0 && src.cropWidth() == dst.cropWidth() && src.cropHeight() == dst.cropHeight())) { return ERROR_UNSUPPORTED; } - uint32_t *dst_ptr = (uint32_t *)dst.mBits - + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2; + uint16_t *dst_ptr = (uint16_t *)dst.mBits + + dst.mCropTop * dst.mWidth + dst.mCropLeft; const uint8_t *src_y = (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft; @@ -340,7 +343,11 @@ status_t ColorConverter::convertQCOMYUV420SemiPlanar( | ((kAdjustedClip[g2] >> 2) << 5) | (kAdjustedClip[r2] >> 3); - dst_ptr[x / 2] = (rgb2 << 16) | rgb1; + if (x + 1 < src.cropWidth()) { + *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1; + } else { + dst_ptr[x] = rgb1; + } } src_y += src.mWidth; @@ -349,7 +356,7 @@ status_t ColorConverter::convertQCOMYUV420SemiPlanar( src_u += src.mWidth; } - dst_ptr += dst.mWidth / 2; + dst_ptr += dst.mWidth; } return OK; @@ -361,15 +368,14 @@ status_t ColorConverter::convertYUV420SemiPlanar( uint8_t *kAdjustedClip = initClip(); - if (!((dst.mWidth & 3) == 0 - && (src.mCropLeft & 1) == 0 + if (!((src.mCropLeft & 1) == 0 && src.cropWidth() == dst.cropWidth() && src.cropHeight() == dst.cropHeight())) { return ERROR_UNSUPPORTED; } - uint32_t *dst_ptr = (uint32_t *)dst.mBits - + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2; + uint16_t *dst_ptr = (uint16_t *)dst.mBits + + dst.mCropTop * dst.mWidth + dst.mCropLeft; const uint8_t *src_y = (const uint8_t *)src.mBits + src.mCropTop * src.mWidth + src.mCropLeft; @@ -411,7 +417,11 @@ status_t ColorConverter::convertYUV420SemiPlanar( | ((kAdjustedClip[g2] >> 2) << 5) | (kAdjustedClip[r2] >> 3); - dst_ptr[x / 2] = (rgb2 << 16) | rgb1; + if (x + 1 < src.cropWidth()) { + *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1; + } else { + dst_ptr[x] = rgb1; + } } src_y += src.mWidth; @@ -420,7 +430,7 @@ status_t ColorConverter::convertYUV420SemiPlanar( src_u += src.mWidth; } - dst_ptr += dst.mWidth / 2; + dst_ptr += dst.mWidth; } return OK; @@ -430,15 +440,14 @@ status_t ColorConverter::convertTIYUV420PackedSemiPlanar( const BitmapParams &src, const BitmapParams &dst) { uint8_t *kAdjustedClip = initClip(); - if (!((dst.mWidth & 3) == 0 - && (src.mCropLeft & 1) == 0 + if (!((src.mCropLeft & 1) == 0 && src.cropWidth() == dst.cropWidth() && src.cropHeight() == dst.cropHeight())) { return ERROR_UNSUPPORTED; } - uint32_t *dst_ptr = (uint32_t *)dst.mBits - + (dst.mCropTop * dst.mWidth + dst.mCropLeft) / 2; + uint16_t *dst_ptr = (uint16_t *)dst.mBits + + dst.mCropTop * dst.mWidth + dst.mCropLeft; const uint8_t *src_y = (const uint8_t *)src.mBits; @@ -478,7 +487,11 @@ status_t ColorConverter::convertTIYUV420PackedSemiPlanar( | ((kAdjustedClip[g2] >> 2) << 5) | (kAdjustedClip[b2] >> 3); - dst_ptr[x / 2] = (rgb2 << 16) | rgb1; + if (x + 1 < src.cropWidth()) { + *(uint32_t *)(&dst_ptr[x]) = (rgb2 << 16) | rgb1; + } else { + dst_ptr[x] = rgb1; + } } src_y += src.mWidth; @@ -487,7 +500,7 @@ status_t ColorConverter::convertTIYUV420PackedSemiPlanar( src_u += src.mWidth; } - dst_ptr += dst.mWidth / 2; + dst_ptr += dst.mWidth; } return OK; diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp index e892f92..059d6b9 100644 --- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp +++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp @@ -19,12 +19,9 @@ #include "../include/SoftwareRenderer.h" -#include <binder/MemoryHeapBase.h> -#include <binder/MemoryHeapPmem.h> #include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MetaData.h> -#include <surfaceflinger/Surface.h> -#include <ui/android_native_buffer.h> +#include <system/window.h> #include <ui/GraphicBufferMapper.h> #include <gui/ISurfaceTexture.h> diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp index 0a6776e..8b01ac6 100644 --- a/media/libstagefright/foundation/AMessage.cpp +++ b/media/libstagefright/foundation/AMessage.cpp @@ -19,6 +19,7 @@ #include <ctype.h> #include "AAtomizer.h" +#include "ABuffer.h" #include "ADebug.h" #include "ALooperRoster.h" #include "AString.h" @@ -73,6 +74,7 @@ void AMessage::freeItem(Item *item) { case kTypeObject: case kTypeMessage: + case kTypeBuffer: { if (item->u.refValue != NULL) { item->u.refValue->decStrong(this); @@ -157,14 +159,23 @@ void AMessage::setString( item->u.stringValue = new AString(s, len < 0 ? strlen(s) : len); } -void AMessage::setObject(const char *name, const sp<RefBase> &obj) { +void AMessage::setObjectInternal( + const char *name, const sp<RefBase> &obj, Type type) { Item *item = allocateItem(name); - item->mType = kTypeObject; + item->mType = type; if (obj != NULL) { obj->incStrong(this); } item->u.refValue = obj.get(); } +void AMessage::setObject(const char *name, const sp<RefBase> &obj) { + setObjectInternal(name, obj, kTypeObject); +} + +void AMessage::setBuffer(const char *name, const sp<ABuffer> &buffer) { + setObjectInternal(name, sp<RefBase>(buffer), kTypeBuffer); +} + void AMessage::setMessage(const char *name, const sp<AMessage> &obj) { Item *item = allocateItem(name); item->mType = kTypeMessage; @@ -203,6 +214,15 @@ bool AMessage::findObject(const char *name, sp<RefBase> *obj) const { return false; } +bool AMessage::findBuffer(const char *name, sp<ABuffer> *buf) const { + const Item *item = findItem(name, kTypeBuffer); + if (item) { + *buf = (ABuffer *)(item->u.refValue); + return true; + } + return false; +} + bool AMessage::findMessage(const char *name, sp<AMessage> *obj) const { const Item *item = findItem(name, kTypeMessage); if (item) { @@ -273,6 +293,7 @@ sp<AMessage> AMessage::dup() const { } case kTypeObject: + case kTypeBuffer: { to->u.refValue = from->u.refValue; to->u.refValue->incStrong(msg.get()); @@ -377,6 +398,10 @@ AString AMessage::debugString(int32_t indent) const { tmp = StringPrintf( "RefBase *%s = %p", item.mName, item.u.refValue); break; + case kTypeBuffer: + tmp = StringPrintf( + "ABuffer *%s = %p", item.mName, item.u.refValue); + break; case kTypeMessage: tmp = StringPrintf( "AMessage %s = %s", @@ -542,4 +567,20 @@ void AMessage::writeToParcel(Parcel *parcel) const { } } +size_t AMessage::countEntries() const { + return mNumItems; +} + +const char *AMessage::getEntryNameAt(size_t index, Type *type) const { + if (index >= mNumItems) { + *type = kTypeInt32; + + return NULL; + } + + *type = mItems[index].mType; + + return mItems[index].mName; +} + } // namespace android diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp index 0df66f1..0cddd2e 100644 --- a/media/libstagefright/httplive/LiveSession.cpp +++ b/media/libstagefright/httplive/LiveSession.cpp @@ -215,7 +215,9 @@ void LiveSession::onDisconnect() { mDisconnectPending = false; } -status_t LiveSession::fetchFile(const char *url, sp<ABuffer> *out) { +status_t LiveSession::fetchFile( + const char *url, sp<ABuffer> *out, + int64_t range_offset, int64_t range_length) { *out = NULL; sp<DataSource> source; @@ -234,8 +236,18 @@ status_t LiveSession::fetchFile(const char *url, sp<ABuffer> *out) { } } - status_t err = mHTTPDataSource->connect( - url, mExtraHeaders.isEmpty() ? NULL : &mExtraHeaders); + KeyedVector<String8, String8> headers = mExtraHeaders; + if (range_offset > 0 || range_length >= 0) { + headers.add( + String8("Range"), + String8( + StringPrintf( + "bytes=%lld-%s", + range_offset, + range_length < 0 + ? "" : StringPrintf("%lld", range_offset + range_length - 1).c_str()).c_str())); + } + status_t err = mHTTPDataSource->connect(url, &headers); if (err != OK) { return err; @@ -270,9 +282,21 @@ status_t LiveSession::fetchFile(const char *url, sp<ABuffer> *out) { buffer = copy; } + size_t maxBytesToRead = bufferRemaining; + if (range_length >= 0) { + int64_t bytesLeftInRange = range_length - buffer->size(); + if (bytesLeftInRange < maxBytesToRead) { + maxBytesToRead = bytesLeftInRange; + + if (bytesLeftInRange == 0) { + break; + } + } + } + ssize_t n = source->readAt( buffer->size(), buffer->data() + buffer->size(), - bufferRemaining); + maxBytesToRead); if (n < 0) { return n; @@ -659,8 +683,15 @@ rinse_repeat: explicitDiscontinuity = true; } + int64_t range_offset, range_length; + if (!itemMeta->findInt64("range-offset", &range_offset) + || !itemMeta->findInt64("range-length", &range_length)) { + range_offset = 0; + range_length = -1; + } + sp<ABuffer> buffer; - status_t err = fetchFile(uri.c_str(), &buffer); + status_t err = fetchFile(uri.c_str(), &buffer, range_offset, range_length); if (err != OK) { ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str()); mDataSource->queueEOS(err); diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp index 5e30488..7d3cf05 100644 --- a/media/libstagefright/httplive/M3UParser.cpp +++ b/media/libstagefright/httplive/M3UParser.cpp @@ -152,6 +152,7 @@ status_t M3UParser::parse(const void *_data, size_t size) { const char *data = (const char *)_data; size_t offset = 0; + uint64_t segmentRangeOffset = 0; while (offset < size) { size_t offsetLF = offset; while (offsetLF < size && data[offsetLF] != '\n') { @@ -218,6 +219,24 @@ status_t M3UParser::parse(const void *_data, size_t size) { } mIsVariantPlaylist = true; err = parseStreamInf(line, &itemMeta); + } else if (line.startsWith("#EXT-X-BYTERANGE")) { + if (mIsVariantPlaylist) { + return ERROR_MALFORMED; + } + + uint64_t length, offset; + err = parseByteRange(line, segmentRangeOffset, &length, &offset); + + if (err == OK) { + if (itemMeta == NULL) { + itemMeta = new AMessage; + } + + itemMeta->setInt64("range-offset", offset); + itemMeta->setInt64("range-length", length); + + segmentRangeOffset = offset + length; + } } if (err != OK) { @@ -447,6 +466,52 @@ status_t M3UParser::parseCipherInfo( } // static +status_t M3UParser::parseByteRange( + const AString &line, uint64_t curOffset, + uint64_t *length, uint64_t *offset) { + ssize_t colonPos = line.find(":"); + + if (colonPos < 0) { + return ERROR_MALFORMED; + } + + ssize_t atPos = line.find("@", colonPos + 1); + + AString lenStr; + if (atPos < 0) { + lenStr = AString(line, colonPos + 1, line.size() - colonPos - 1); + } else { + lenStr = AString(line, colonPos + 1, atPos - colonPos - 1); + } + + lenStr.trim(); + + const char *s = lenStr.c_str(); + char *end; + *length = strtoull(s, &end, 10); + + if (s == end || *end != '\0') { + return ERROR_MALFORMED; + } + + if (atPos >= 0) { + AString offStr = AString(line, atPos + 1, line.size() - atPos - 1); + offStr.trim(); + + const char *s = offStr.c_str(); + *offset = strtoull(s, &end, 10); + + if (s == end || *end != '\0') { + return ERROR_MALFORMED; + } + } else { + *offset = curOffset; + } + + return OK; +} + +// static status_t M3UParser::ParseInt32(const char *s, int32_t *x) { char *end; long lval = strtol(s, &end, 10); diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk index 23c8e44..ff35d4a 100644 --- a/media/libstagefright/id3/Android.mk +++ b/media/libstagefright/id3/Android.mk @@ -16,7 +16,7 @@ LOCAL_SRC_FILES := \ testid3.cpp LOCAL_SHARED_LIBRARIES := \ - libstagefright libutils libbinder + libstagefright libutils libbinder libstagefright_foundation LOCAL_STATIC_LIBRARIES := \ libstagefright_id3 diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp index 6dde9d8..2e92926 100644 --- a/media/libstagefright/id3/ID3.cpp +++ b/media/libstagefright/id3/ID3.cpp @@ -20,8 +20,8 @@ #include "../include/ID3.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/DataSource.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/Utils.h> #include <utils/String8.h> #include <byteswap.h> diff --git a/media/libstagefright/id3/testid3.cpp b/media/libstagefright/id3/testid3.cpp index 0741045..bc4572c 100644 --- a/media/libstagefright/id3/testid3.cpp +++ b/media/libstagefright/id3/testid3.cpp @@ -23,7 +23,7 @@ #include <binder/ProcessState.h> #include <media/stagefright/FileSource.h> -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> #define MAXPATHLEN 256 @@ -70,7 +70,7 @@ static void hexdump(const void *_data, size_t size) { void scanFile(const char *path) { sp<FileSource> file = new FileSource(path); - CHECK_EQ(file->initCheck(), OK); + CHECK_EQ(file->initCheck(), (status_t)OK); ID3 tag(file); if (!tag.isValid()) { diff --git a/media/libstagefright/include/AACExtractor.h b/media/libstagefright/include/AACExtractor.h index 8e5657b..e98ca82 100644 --- a/media/libstagefright/include/AACExtractor.h +++ b/media/libstagefright/include/AACExtractor.h @@ -29,7 +29,7 @@ class String8; class AACExtractor : public MediaExtractor { public: - AACExtractor(const sp<DataSource> &source); + AACExtractor(const sp<DataSource> &source, const sp<AMessage> &meta); virtual size_t countTracks(); virtual sp<MediaSource> getTrack(size_t index); diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h index 0985f47..4c7bfa6 100644 --- a/media/libstagefright/include/AwesomePlayer.h +++ b/media/libstagefright/include/AwesomePlayer.h @@ -41,7 +41,7 @@ struct ISurfaceTexture; class DrmManagerClinet; class DecryptHandle; -class TimedTextPlayer; +class TimedTextDriver; struct WVMExtractor; struct AwesomeRenderer : public RefBase { @@ -232,7 +232,7 @@ private: sp<DecryptHandle> mDecryptHandle; int64_t mLastVideoTimeUs; - TimedTextPlayer *mTextPlayer; + TimedTextDriver *mTextDriver; mutable Mutex mTimedTextLock; sp<WVMExtractor> mWVMExtractor; @@ -258,7 +258,7 @@ private: void setVideoSource(sp<MediaSource> source); status_t initVideoDecoder(uint32_t flags = 0); - void addTextSource(sp<MediaSource> source); + void addTextSource(const sp<MediaSource>& source); void onStreamDone(); @@ -290,6 +290,7 @@ private: bool isStreamingHTTP() const; void sendCacheStats(); + void checkDrmStatus(const sp<DataSource>& dataSource); enum FlagMode { SET, @@ -325,4 +326,3 @@ private: } // namespace android #endif // AWESOME_PLAYER_H_ - diff --git a/media/libstagefright/include/ChromiumHTTPDataSource.h b/media/libstagefright/include/ChromiumHTTPDataSource.h index 18f8913..82e08fd 100644 --- a/media/libstagefright/include/ChromiumHTTPDataSource.h +++ b/media/libstagefright/include/ChromiumHTTPDataSource.h @@ -43,7 +43,7 @@ struct ChromiumHTTPDataSource : public HTTPBase { virtual status_t getSize(off64_t *size); virtual uint32_t flags(); - virtual sp<DecryptHandle> DrmInitialization(); + virtual sp<DecryptHandle> DrmInitialization(const char *mime); virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client); diff --git a/media/libstagefright/include/DataUriSource.h b/media/libstagefright/include/DataUriSource.h new file mode 100644 index 0000000..d223c06 --- /dev/null +++ b/media/libstagefright/include/DataUriSource.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2011 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef DATA_URI_SOURCE_H_ + +#define DATA_URI_SOURCE_H_ + +#include <stdio.h> + +#include <media/stagefright/DataSource.h> +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/foundation/AString.h> + +namespace android { + +class DataUriSource : public DataSource { +public: + DataUriSource(const char *uri); + + virtual status_t initCheck() const { + return mInited; + } + + virtual ssize_t readAt(off64_t offset, void *data, size_t size); + + virtual status_t getSize(off64_t *size) { + if (mInited != OK) { + return mInited; + } + + *size = mData.size(); + return OK; + } + + virtual String8 getUri() { + return mDataUri; + } + + virtual String8 getMIMEType() const { + return mMimeType; + } + +protected: + virtual ~DataUriSource() { + // Nothing to delete. + } + +private: + const String8 mDataUri; + + String8 mMimeType; + // Use AString because individual bytes may not be valid UTF8 chars. + AString mData; + status_t mInited; + + // Disallow copy and assign. + DataUriSource(const DataUriSource &); + DataUriSource &operator=(const DataUriSource &); +}; + +} // namespace android + +#endif // DATA_URI_SOURCE_H_ diff --git a/media/libstagefright/include/LiveSession.h b/media/libstagefright/include/LiveSession.h index 116ed0e..3a11612 100644 --- a/media/libstagefright/include/LiveSession.h +++ b/media/libstagefright/include/LiveSession.h @@ -120,7 +120,10 @@ private: void onMonitorQueue(); void onSeek(const sp<AMessage> &msg); - status_t fetchFile(const char *url, sp<ABuffer> *out); + status_t fetchFile( + const char *url, sp<ABuffer> *out, + int64_t range_offset = 0, int64_t range_length = -1); + sp<M3UParser> fetchPlaylist(const char *url, bool *unchanged); size_t getBandwidthIndex(); diff --git a/media/libstagefright/include/M3UParser.h b/media/libstagefright/include/M3UParser.h index 478582d..e30d6fd 100644 --- a/media/libstagefright/include/M3UParser.h +++ b/media/libstagefright/include/M3UParser.h @@ -72,6 +72,10 @@ private: static status_t parseCipherInfo( const AString &line, sp<AMessage> *meta, const AString &baseURI); + static status_t parseByteRange( + const AString &line, uint64_t curOffset, + uint64_t *length, uint64_t *offset); + static status_t ParseInt32(const char *s, int32_t *x); static status_t ParseDouble(const char *s, double *x); diff --git a/media/libstagefright/include/NuCachedSource2.h b/media/libstagefright/include/NuCachedSource2.h index 7a03e7e..c27a29b 100644 --- a/media/libstagefright/include/NuCachedSource2.h +++ b/media/libstagefright/include/NuCachedSource2.h @@ -40,7 +40,7 @@ struct NuCachedSource2 : public DataSource { virtual status_t getSize(off64_t *size); virtual uint32_t flags(); - virtual sp<DecryptHandle> DrmInitialization(); + virtual sp<DecryptHandle> DrmInitialization(const char* mime); virtual void getDrmInfo(sp<DecryptHandle> &handle, DrmManagerClient **client); virtual String8 getUri(); diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h index 53e764f..2c87b34 100644 --- a/media/libstagefright/include/OMX.h +++ b/media/libstagefright/include/OMX.h @@ -31,7 +31,7 @@ class OMX : public BnOMX, public: OMX(); - virtual bool livesLocally(pid_t pid); + virtual bool livesLocally(node_id node, pid_t pid); virtual status_t listNodes(List<ComponentInfo> *list); diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h index 8f2ea95..7ab0042 100644 --- a/media/libstagefright/include/SoftwareRenderer.h +++ b/media/libstagefright/include/SoftwareRenderer.h @@ -20,7 +20,7 @@ #include <media/stagefright/ColorConverter.h> #include <utils/RefBase.h> -#include <ui/android_native_buffer.h> +#include <system/window.h> namespace android { diff --git a/media/libstagefright/include/ThrottledSource.h b/media/libstagefright/include/ThrottledSource.h index 8928a4a..7fe7c06 100644 --- a/media/libstagefright/include/ThrottledSource.h +++ b/media/libstagefright/include/ThrottledSource.h @@ -35,6 +35,11 @@ struct ThrottledSource : public DataSource { virtual status_t getSize(off64_t *size); virtual uint32_t flags(); + virtual String8 getMIMEType() const { + return mSource->getMIMEType(); + } + + private: Mutex mLock; diff --git a/media/libstagefright/include/WVMExtractor.h b/media/libstagefright/include/WVMExtractor.h index deecd25..3c3ca89 100644 --- a/media/libstagefright/include/WVMExtractor.h +++ b/media/libstagefright/include/WVMExtractor.h @@ -23,6 +23,8 @@ namespace android { +struct AMessage; +class String8; class DataSource; class WVMLoadableExtractor : public MediaExtractor { @@ -32,6 +34,7 @@ public: virtual int64_t getCachedDurationUs(status_t *finalStatus) = 0; virtual void setAdaptiveStreamingMode(bool adaptive) = 0; + virtual void setUID(uid_t uid) = 0; }; class WVMExtractor : public MediaExtractor { @@ -58,6 +61,10 @@ public: // is used. void setAdaptiveStreamingMode(bool adaptive); + void setUID(uid_t uid); + + static bool getVendorLibHandle(); + protected: virtual ~WVMExtractor(); @@ -69,6 +76,10 @@ private: WVMExtractor &operator=(const WVMExtractor &); }; +bool SniffWVM( + const sp<DataSource> &source, String8 *mimeType, float *confidence, + sp<AMessage> *); + } // namespace android #endif // DRM_EXTRACTOR_H_ diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp index 4fbf47e..a0db719 100644 --- a/media/libstagefright/matroska/MatroskaExtractor.cpp +++ b/media/libstagefright/matroska/MatroskaExtractor.cpp @@ -93,7 +93,10 @@ struct BlockIterator { void advance(); void reset(); - void seek(int64_t seekTimeUs, bool seekToKeyFrame); + + void seek( + int64_t seekTimeUs, bool seekToKeyFrame, + int64_t *actualFrameTimeUs); const mkvparser::Block *block() const; int64_t blockTimeUs() const; @@ -303,23 +306,53 @@ void BlockIterator::reset() { } while (!eos() && block()->GetTrackNumber() != mTrackNum); } -void BlockIterator::seek(int64_t seekTimeUs, bool seekToKeyFrame) { +void BlockIterator::seek( + int64_t seekTimeUs, bool seekToKeyFrame, + int64_t *actualFrameTimeUs) { Mutex::Autolock autoLock(mExtractor->mLock); - mCluster = mExtractor->mSegment->FindCluster(seekTimeUs * 1000ll); + *actualFrameTimeUs = -1ll; + + int64_t seekTimeNs = seekTimeUs * 1000ll; + + mCluster = mExtractor->mSegment->FindCluster(seekTimeNs); mBlockEntry = NULL; mBlockEntryIndex = 0; - do { + long prevKeyFrameBlockEntryIndex = -1; + + for (;;) { advance_l(); - } - while (!eos() && block()->GetTrackNumber() != mTrackNum); - if (seekToKeyFrame) { - while (!eos() && !mBlockEntry->GetBlock()->IsKey()) { - advance_l(); + if (eos()) { + break; + } + + if (block()->GetTrackNumber() != mTrackNum) { + continue; + } + + if (block()->IsKey()) { + prevKeyFrameBlockEntryIndex = mBlockEntryIndex - 1; + } + + int64_t timeNs = block()->GetTime(mCluster); + + if (timeNs >= seekTimeNs) { + *actualFrameTimeUs = (timeNs + 500ll) / 1000ll; + break; } } + + if (eos()) { + return; + } + + if (seekToKeyFrame && !block()->IsKey()) { + CHECK_GE(prevKeyFrameBlockEntryIndex, 0); + mBlockEntryIndex = prevKeyFrameBlockEntryIndex; + advance_l(); + } } const mkvparser::Block *BlockIterator::block() const { @@ -397,6 +430,8 @@ status_t MatroskaSource::read( MediaBuffer **out, const ReadOptions *options) { *out = NULL; + int64_t targetSampleTimeUs = -1ll; + int64_t seekTimeUs; ReadOptions::SeekMode mode; if (options && options->getSeekTo(&seekTimeUs, &mode) @@ -406,10 +441,14 @@ status_t MatroskaSource::read( // Apparently keyframe indication in audio tracks is unreliable, // fortunately in all our currently supported audio encodings every // frame is effectively a keyframe. - mBlockIter.seek(seekTimeUs, !mIsAudio); + int64_t actualFrameTimeUs; + mBlockIter.seek(seekTimeUs, !mIsAudio, &actualFrameTimeUs); + + if (mode == ReadOptions::SEEK_CLOSEST) { + targetSampleTimeUs = actualFrameTimeUs; + } } -again: while (mPendingFrames.empty()) { status_t err = readBlock(); @@ -424,6 +463,11 @@ again: mPendingFrames.erase(mPendingFrames.begin()); if (mType != AVC) { + if (targetSampleTimeUs >= 0ll) { + frame->meta_data()->setInt64( + kKeyTargetTime, targetSampleTimeUs); + } + *out = frame; return OK; @@ -506,6 +550,11 @@ again: frame->release(); frame = NULL; + if (targetSampleTimeUs >= 0ll) { + buffer->meta_data()->setInt64( + kKeyTargetTime, targetSampleTimeUs); + } + *out = buffer; return OK; @@ -610,36 +659,41 @@ bool MatroskaExtractor::isLiveStreaming() const { return mIsLiveStreaming; } -static void addESDSFromAudioSpecificInfo( - const sp<MetaData> &meta, const void *asi, size_t asiSize) { +static void addESDSFromCodecPrivate( + const sp<MetaData> &meta, + bool isAudio, const void *priv, size_t privSize) { static const uint8_t kStaticESDS[] = { 0x03, 22, 0x00, 0x00, // ES_ID 0x00, // streamDependenceFlag, URL_Flag, OCRstreamFlag 0x04, 17, - 0x40, // Audio ISO/IEC 14496-3 + 0x40, // ObjectTypeIndication 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - // AudioSpecificInfo (with size prefix) follows + // CodecSpecificInfo (with size prefix) follows }; // Make sure all sizes can be coded in a single byte. - CHECK(asiSize + 22 - 2 < 128); - size_t esdsSize = sizeof(kStaticESDS) + asiSize + 1; + CHECK(privSize + 22 - 2 < 128); + size_t esdsSize = sizeof(kStaticESDS) + privSize + 1; uint8_t *esds = new uint8_t[esdsSize]; memcpy(esds, kStaticESDS, sizeof(kStaticESDS)); uint8_t *ptr = esds + sizeof(kStaticESDS); - *ptr++ = asiSize; - memcpy(ptr, asi, asiSize); + *ptr++ = privSize; + memcpy(ptr, priv, privSize); // Increment by codecPrivateSize less 2 bytes that are accounted for // already in lengths of 22/17 - esds[1] += asiSize - 2; - esds[6] += asiSize - 2; + esds[1] += privSize - 2; + esds[6] += privSize - 2; + + // Set ObjectTypeIndication. + esds[7] = isAudio ? 0x40 // Audio ISO/IEC 14496-3 + : 0x20; // Visual ISO/IEC 14496-2 meta->setData(kKeyESDS, 0, esds, esdsSize); @@ -707,9 +761,21 @@ void MatroskaExtractor::addTracks() { if (!strcmp("V_MPEG4/ISO/AVC", codecID)) { meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC); meta->setData(kKeyAVCC, 0, codecPrivate, codecPrivateSize); + } else if (!strcmp("V_MPEG4/ISO/ASP", codecID)) { + if (codecPrivateSize > 0) { + meta->setCString( + kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG4); + addESDSFromCodecPrivate( + meta, false, codecPrivate, codecPrivateSize); + } else { + ALOGW("%s is detected, but does not have configuration.", + codecID); + continue; + } } else if (!strcmp("V_VP8", codecID)) { meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_VPX); } else { + ALOGW("%s is not supported.", codecID); continue; } @@ -727,13 +793,16 @@ void MatroskaExtractor::addTracks() { meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_AAC); CHECK(codecPrivateSize >= 2); - addESDSFromAudioSpecificInfo( - meta, codecPrivate, codecPrivateSize); + addESDSFromCodecPrivate( + meta, true, codecPrivate, codecPrivateSize); } else if (!strcmp("A_VORBIS", codecID)) { meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_VORBIS); addVorbisCodecInfo(meta, codecPrivate, codecPrivateSize); + } else if (!strcmp("A_MPEG/L3", codecID)) { + meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG); } else { + ALOGW("%s is not supported.", codecID); continue; } diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp index 03033f5..e1589b4 100644 --- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp +++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp @@ -22,8 +22,8 @@ #include "include/LiveSession.h" #include "include/NuCachedSource2.h" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/DataSource.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaSource.h> diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp index 694b12d..f11fcd2 100644 --- a/media/libstagefright/omx/OMX.cpp +++ b/media/libstagefright/omx/OMX.cpp @@ -25,7 +25,7 @@ #include "../include/OMXNodeInstance.h" #include <binder/IMemory.h> -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> #include <utils/threads.h> #include "OMXMaster.h" @@ -102,7 +102,7 @@ OMX::CallbackDispatcher::~CallbackDispatcher() { if (status != WOULD_BLOCK) { // Other than join to self, the only other error return codes are // whatever readyToRun() returns, and we don't override that - CHECK_EQ(status, NO_ERROR); + CHECK_EQ(status, (status_t)NO_ERROR); } } @@ -185,7 +185,7 @@ void OMX::binderDied(const wp<IBinder> &the_late_who) { instance->onObserverDied(mMaster); } -bool OMX::livesLocally(pid_t pid) { +bool OMX::livesLocally(node_id node, pid_t pid) { return pid == getpid(); } diff --git a/media/libstagefright/omx/OMXComponentBase.cpp b/media/libstagefright/omx/OMXComponentBase.cpp index 35227a0..7d11dce 100644 --- a/media/libstagefright/omx/OMXComponentBase.cpp +++ b/media/libstagefright/omx/OMXComponentBase.cpp @@ -18,7 +18,7 @@ #include <stdlib.h> -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> namespace android { @@ -33,7 +33,7 @@ OMXComponentBase::OMXComponentBase( OMXComponentBase::~OMXComponentBase() {} void OMXComponentBase::setComponentHandle(OMX_COMPONENTTYPE *handle) { - CHECK_EQ(mComponentHandle, NULL); + CHECK(mComponentHandle == NULL); mComponentHandle = handle; } diff --git a/media/libstagefright/omx/OMXMaster.cpp b/media/libstagefright/omx/OMXMaster.cpp index d698939..6b6d0ab 100644 --- a/media/libstagefright/omx/OMXMaster.cpp +++ b/media/libstagefright/omx/OMXMaster.cpp @@ -24,7 +24,7 @@ #include <dlfcn.h> -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> namespace android { diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp index 8938e33..099c4f5 100644 --- a/media/libstagefright/omx/OMXNodeInstance.cpp +++ b/media/libstagefright/omx/OMXNodeInstance.cpp @@ -24,8 +24,8 @@ #include <OMX_Component.h> #include <binder/IMemory.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/HardwareAPI.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaErrors.h> namespace android { @@ -91,11 +91,11 @@ OMXNodeInstance::OMXNodeInstance( } OMXNodeInstance::~OMXNodeInstance() { - CHECK_EQ(mHandle, NULL); + CHECK(mHandle == NULL); } void OMXNodeInstance::setHandle(OMX::node_id node_id, OMX_HANDLETYPE handle) { - CHECK_EQ(mHandle, NULL); + CHECK(mHandle == NULL); mNodeID = node_id; mHandle = handle; } diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp index 0914f32..c79e01f 100644 --- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp +++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp @@ -333,8 +333,9 @@ OMX_ERRORTYPE SimpleSoftOMXComponent::getState(OMX_STATETYPE *state) { void SimpleSoftOMXComponent::onMessageReceived(const sp<AMessage> &msg) { Mutex::Autolock autoLock(mLock); - - switch (msg->what()) { + uint32_t msgType = msg->what(); + ALOGV("msgType = %d", msgType); + switch (msgType) { case kWhatSendCommand: { int32_t cmd, param; @@ -354,27 +355,27 @@ void SimpleSoftOMXComponent::onMessageReceived(const sp<AMessage> &msg) { CHECK(mState == OMX_StateExecuting && mTargetState == mState); bool found = false; - for (size_t i = 0; i < mPorts.size(); ++i) { - PortInfo *port = &mPorts.editItemAt(i); + size_t portIndex = (kWhatEmptyThisBuffer == msgType)? + header->nInputPortIndex: header->nOutputPortIndex; + PortInfo *port = &mPorts.editItemAt(portIndex); - for (size_t j = 0; j < port->mBuffers.size(); ++j) { - BufferInfo *buffer = &port->mBuffers.editItemAt(j); + for (size_t j = 0; j < port->mBuffers.size(); ++j) { + BufferInfo *buffer = &port->mBuffers.editItemAt(j); - if (buffer->mHeader == header) { - CHECK(!buffer->mOwnedByUs); + if (buffer->mHeader == header) { + CHECK(!buffer->mOwnedByUs); - buffer->mOwnedByUs = true; + buffer->mOwnedByUs = true; - CHECK((msg->what() == kWhatEmptyThisBuffer - && port->mDef.eDir == OMX_DirInput) - || (port->mDef.eDir == OMX_DirOutput)); + CHECK((msgType == kWhatEmptyThisBuffer + && port->mDef.eDir == OMX_DirInput) + || (port->mDef.eDir == OMX_DirOutput)); - port->mQueue.push_back(buffer); - onQueueFilled(i); + port->mQueue.push_back(buffer); + onQueueFilled(portIndex); - found = true; - break; - } + found = true; + break; } } diff --git a/media/libstagefright/omx/SoftOMXPlugin.cpp b/media/libstagefright/omx/SoftOMXPlugin.cpp index da3ae42..99ffe7d 100644 --- a/media/libstagefright/omx/SoftOMXPlugin.cpp +++ b/media/libstagefright/omx/SoftOMXPlugin.cpp @@ -35,8 +35,11 @@ static const struct { } kComponents[] = { { "OMX.google.aac.decoder", "aacdec", "audio_decoder.aac" }, + { "OMX.google.aac.encoder", "aacenc", "audio_encoder.aac" }, { "OMX.google.amrnb.decoder", "amrdec", "audio_decoder.amrnb" }, + { "OMX.google.amrnb.encoder", "amrnbenc", "audio_encoder.amrnb" }, { "OMX.google.amrwb.decoder", "amrdec", "audio_decoder.amrwb" }, + { "OMX.google.amrwb.encoder", "amrwbenc", "audio_encoder.amrwb" }, { "OMX.google.h264.decoder", "h264dec", "video_decoder.avc" }, { "OMX.google.g711.alaw.decoder", "g711dec", "audio_decoder.g711alaw" }, { "OMX.google.g711.mlaw.decoder", "g711dec", "audio_decoder.g711mlaw" }, diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk index bf69428..0c0a70c 100644 --- a/media/libstagefright/omx/tests/Android.mk +++ b/media/libstagefright/omx/tests/Android.mk @@ -5,13 +5,15 @@ LOCAL_SRC_FILES = \ OMXHarness.cpp \ LOCAL_SHARED_LIBRARIES := \ - libstagefright libbinder libmedia libutils + libstagefright libbinder libmedia libutils libstagefright_foundation -LOCAL_C_INCLUDES:= \ +LOCAL_C_INCLUDES := \ $(JNI_H_INCLUDE) \ frameworks/base/media/libstagefright \ $(TOP)/frameworks/base/include/media/stagefright/openmax -LOCAL_MODULE:= omx_tests +LOCAL_MODULE := omx_tests + +LOCAL_MODULE_TAGS := tests include $(BUILD_EXECUTABLE) diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp index 8faf544..fab1771 100644 --- a/media/libstagefright/omx/tests/OMXHarness.cpp +++ b/media/libstagefright/omx/tests/OMXHarness.cpp @@ -26,9 +26,9 @@ #include <binder/IServiceManager.h> #include <binder/MemoryDealer.h> #include <media/IMediaPlayerService.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/DataSource.h> #include <media/stagefright/MediaBuffer.h> -#include <media/stagefright/MediaDebug.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MediaErrors.h> #include <media/stagefright/MediaExtractor.h> @@ -155,7 +155,7 @@ status_t Harness::dequeueMessageForNodeIgnoringBuffers( if (err == TIMED_OUT) { return err; } - CHECK_EQ(err, OK); + CHECK_EQ(err, (status_t)OK); } } @@ -317,7 +317,7 @@ status_t Harness::testStateTransitions( EXPECT_SUCCESS(err, "allocatePortBuffers(input)"); err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT); - CHECK_EQ(err, TIMED_OUT); + CHECK_EQ(err, (status_t)TIMED_OUT); Vector<Buffer> outputBuffers; err = allocatePortBuffers(dealer, node, 1, &outputBuffers); @@ -412,7 +412,7 @@ status_t Harness::testStateTransitions( // Make sure node doesn't just transition to loaded before we are done // freeing all input and output buffers. err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT); - CHECK_EQ(err, TIMED_OUT); + CHECK_EQ(err, (status_t)TIMED_OUT); for (size_t i = 0; i < inputBuffers.size(); ++i) { err = mOMX->freeBuffer(node, 0, inputBuffers[i].mID); @@ -420,7 +420,7 @@ status_t Harness::testStateTransitions( } err = dequeueMessageForNode(node, &msg, DEFAULT_TIMEOUT); - CHECK_EQ(err, TIMED_OUT); + CHECK_EQ(err, (status_t)TIMED_OUT); for (size_t i = 0; i < outputBuffers.size(); ++i) { err = mOMX->freeBuffer(node, 1, outputBuffers[i].mID); @@ -584,7 +584,7 @@ status_t Harness::testSeek( return UNKNOWN_ERROR; } - CHECK_EQ(seekSource->start(), OK); + CHECK_EQ(seekSource->start(), (status_t)OK); sp<MediaSource> codec = OMXCodec::Create( mOMX, source->getFormat(), false /* createEncoder */, @@ -592,7 +592,7 @@ status_t Harness::testSeek( CHECK(codec != NULL); - CHECK_EQ(codec->start(), OK); + CHECK_EQ(codec->start(), (status_t)OK); int64_t durationUs; CHECK(source->getFormat()->findInt64(kKeyDuration, &durationUs)); @@ -638,7 +638,7 @@ status_t Harness::testSeek( requestedSeekTimeUs, MediaSource::ReadOptions::SEEK_NEXT_SYNC); if (seekSource->read(&buffer, &options) != OK) { - CHECK_EQ(buffer, NULL); + CHECK(buffer == NULL); actualSeekTimeUs = -1; } else { CHECK(buffer != NULL); @@ -659,7 +659,7 @@ status_t Harness::testSeek( err = codec->read(&buffer, &options); options.clearSeekTo(); if (err == INFO_FORMAT_CHANGED) { - CHECK_EQ(buffer, NULL); + CHECK(buffer == NULL); continue; } if (err == OK) { @@ -670,7 +670,7 @@ status_t Harness::testSeek( continue; } } else { - CHECK_EQ(buffer, NULL); + CHECK(buffer == NULL); } break; @@ -679,7 +679,7 @@ status_t Harness::testSeek( if (requestedSeekTimeUs < 0) { // Linear read. if (err != OK) { - CHECK_EQ(buffer, NULL); + CHECK(buffer == NULL); } else { CHECK(buffer != NULL); buffer->release(); @@ -694,8 +694,8 @@ status_t Harness::testSeek( "We attempted to seek beyond EOS and expected " "ERROR_END_OF_STREAM to be returned, but instead " "we found some other error."); - CHECK_EQ(err, ERROR_END_OF_STREAM); - CHECK_EQ(buffer, NULL); + CHECK_EQ(err, (status_t)ERROR_END_OF_STREAM); + CHECK(buffer == NULL); } else { EXPECT(err == OK, "Expected a valid buffer to be returned from " @@ -715,7 +715,7 @@ status_t Harness::testSeek( buffer->release(); buffer = NULL; - CHECK_EQ(codec->stop(), OK); + CHECK_EQ(codec->stop(), (status_t)OK); return UNKNOWN_ERROR; } @@ -725,7 +725,7 @@ status_t Harness::testSeek( } } - CHECK_EQ(codec->stop(), OK); + CHECK_EQ(codec->stop(), (status_t)OK); return OK; } @@ -841,7 +841,7 @@ int main(int argc, char **argv) { srand(seed); sp<Harness> h = new Harness; - CHECK_EQ(h->initCheck(), OK); + CHECK_EQ(h->initCheck(), (status_t)OK); if (argc == 0) { h->testAll(); diff --git a/media/libstagefright/rtsp/AAMRAssembler.cpp b/media/libstagefright/rtsp/AAMRAssembler.cpp index 9d72b1f..fb8abc5 100644 --- a/media/libstagefright/rtsp/AAMRAssembler.cpp +++ b/media/libstagefright/rtsp/AAMRAssembler.cpp @@ -211,7 +211,7 @@ ARTPAssembler::AssemblyStatus AAMRAssembler::addPacket( } sp<AMessage> msg = mNotifyMsg->dup(); - msg->setObject("access-unit", accessUnit); + msg->setBuffer("access-unit", accessUnit); msg->post(); queue->erase(queue->begin()); diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp index ed8b1df..7ea132e 100644 --- a/media/libstagefright/rtsp/AAVCAssembler.cpp +++ b/media/libstagefright/rtsp/AAVCAssembler.cpp @@ -345,7 +345,7 @@ void AAVCAssembler::submitAccessUnit() { mAccessUnitDamaged = false; sp<AMessage> msg = mNotifyMsg->dup(); - msg->setObject("access-unit", accessUnit); + msg->setBuffer("access-unit", accessUnit); msg->post(); } diff --git a/media/libstagefright/rtsp/AH263Assembler.cpp b/media/libstagefright/rtsp/AH263Assembler.cpp index 498295c..ded70fa 100644 --- a/media/libstagefright/rtsp/AH263Assembler.cpp +++ b/media/libstagefright/rtsp/AH263Assembler.cpp @@ -166,7 +166,7 @@ void AH263Assembler::submitAccessUnit() { mAccessUnitDamaged = false; sp<AMessage> msg = mNotifyMsg->dup(); - msg->setObject("access-unit", accessUnit); + msg->setBuffer("access-unit", accessUnit); msg->post(); } diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp index b0c7007..24c2f30 100644 --- a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp +++ b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp @@ -571,7 +571,7 @@ void AMPEG4AudioAssembler::submitAccessUnit() { mAccessUnitDamaged = false; sp<AMessage> msg = mNotifyMsg->dup(); - msg->setObject("access-unit", accessUnit); + msg->setBuffer("access-unit", accessUnit); msg->post(); } diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp index 2f2e2c2..687d72b 100644 --- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp +++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp @@ -368,7 +368,7 @@ void AMPEG4ElementaryAssembler::submitAccessUnit() { mAccessUnitDamaged = false; sp<AMessage> msg = mNotifyMsg->dup(); - msg->setObject("access-unit", accessUnit); + msg->setBuffer("access-unit", accessUnit); msg->post(); } diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp index 8c9dd8d..44988a3 100644 --- a/media/libstagefright/rtsp/ARTPConnection.cpp +++ b/media/libstagefright/rtsp/ARTPConnection.cpp @@ -639,7 +639,7 @@ sp<ARTPSource> ARTPConnection::findSource(StreamInfo *info, uint32_t srcId) { void ARTPConnection::injectPacket(int index, const sp<ABuffer> &buffer) { sp<AMessage> msg = new AMessage(kWhatInjectPacket, id()); msg->setInt32("index", index); - msg->setObject("buffer", buffer); + msg->setBuffer("buffer", buffer); msg->post(); } @@ -647,10 +647,8 @@ void ARTPConnection::onInjectPacket(const sp<AMessage> &msg) { int32_t index; CHECK(msg->findInt32("index", &index)); - sp<RefBase> obj; - CHECK(msg->findObject("buffer", &obj)); - - sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get()); + sp<ABuffer> buffer; + CHECK(msg->findBuffer("buffer", &buffer)); List<StreamInfo>::iterator it = mStreams.begin(); while (it != mStreams.end() diff --git a/media/libstagefright/rtsp/ARTPSession.cpp b/media/libstagefright/rtsp/ARTPSession.cpp index 7a05b88..ba4e33c 100644 --- a/media/libstagefright/rtsp/ARTPSession.cpp +++ b/media/libstagefright/rtsp/ARTPSession.cpp @@ -145,10 +145,8 @@ void ARTPSession::onMessageReceived(const sp<AMessage> &msg) { break; } - sp<RefBase> obj; - CHECK(msg->findObject("access-unit", &obj)); - - sp<ABuffer> accessUnit = static_cast<ABuffer *>(obj.get()); + sp<ABuffer> accessUnit; + CHECK(msg->findBuffer("access-unit", &accessUnit)); uint64_t ntpTime; CHECK(accessUnit->meta()->findInt64( diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp index 80a010e..539a888 100644 --- a/media/libstagefright/rtsp/ARTSPConnection.cpp +++ b/media/libstagefright/rtsp/ARTSPConnection.cpp @@ -612,7 +612,7 @@ bool ARTSPConnection::receiveRTSPReponse() { if (mObserveBinaryMessage != NULL) { sp<AMessage> notify = mObserveBinaryMessage->dup(); - notify->setObject("buffer", buffer); + notify->setBuffer("buffer", buffer); notify->post(); } else { ALOGW("received binary data, but no one cares."); diff --git a/media/libstagefright/rtsp/ARawAudioAssembler.cpp b/media/libstagefright/rtsp/ARawAudioAssembler.cpp index 98bee82..0da5dd2 100644 --- a/media/libstagefright/rtsp/ARawAudioAssembler.cpp +++ b/media/libstagefright/rtsp/ARawAudioAssembler.cpp @@ -94,7 +94,7 @@ ARTPAssembler::AssemblyStatus ARawAudioAssembler::addPacket( } sp<AMessage> msg = mNotifyMsg->dup(); - msg->setObject("access-unit", buffer); + msg->setBuffer("access-unit", buffer); msg->post(); queue->erase(queue->begin()); diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h index 2391c5c..deee30f 100644 --- a/media/libstagefright/rtsp/MyHandler.h +++ b/media/libstagefright/rtsp/MyHandler.h @@ -122,6 +122,7 @@ struct MyHandler : public AHandler { mSetupTracksSuccessful(false), mSeekPending(false), mFirstAccessUnit(true), + mAllTracksHaveTime(false), mNTPAnchorUs(-1), mMediaAnchorUs(-1), mLastMediaTimeUs(0), @@ -723,6 +724,7 @@ struct MyHandler : public AHandler { mSetupTracksSuccessful = false; mSeekPending = false; mFirstAccessUnit = true; + mAllTracksHaveTime = false; mNTPAnchorUs = -1; mMediaAnchorUs = -1; mNumAccessUnitsReceived = 0; @@ -855,10 +857,8 @@ struct MyHandler : public AHandler { return; } - sp<RefBase> obj; - CHECK(msg->findObject("access-unit", &obj)); - - sp<ABuffer> accessUnit = static_cast<ABuffer *>(obj.get()); + sp<ABuffer> accessUnit; + CHECK(msg->findBuffer("access-unit", &accessUnit)); uint32_t seqNum = (uint32_t)accessUnit->int32Data(); @@ -930,6 +930,7 @@ struct MyHandler : public AHandler { info->mNTPAnchorUs = -1; } + mAllTracksHaveTime = false; mNTPAnchorUs = -1; int64_t timeUs; @@ -1002,9 +1003,8 @@ struct MyHandler : public AHandler { case 'biny': { - sp<RefBase> obj; - CHECK(msg->findObject("buffer", &obj)); - sp<ABuffer> buffer = static_cast<ABuffer *>(obj.get()); + sp<ABuffer> buffer; + CHECK(msg->findBuffer("buffer", &buffer)); int32_t index; CHECK(buffer->meta()->findInt32("index", &index)); @@ -1037,6 +1037,14 @@ struct MyHandler : public AHandler { ALOGW("Never received any data, disconnecting."); (new AMessage('abor', id()))->post(); } + } else { + if (!mAllTracksHaveTime) { + ALOGW("We received some RTCP packets, but time " + "could not be established on all tracks, now " + "using fake timestamps"); + + fakeTimestamps(); + } } break; } @@ -1211,6 +1219,7 @@ private: bool mSeekPending; bool mFirstAccessUnit; + bool mAllTracksHaveTime; int64_t mNTPAnchorUs; int64_t mMediaAnchorUs; int64_t mLastMediaTimeUs; @@ -1357,6 +1366,7 @@ private: } void fakeTimestamps() { + mNTPAnchorUs = -1ll; for (size_t i = 0; i < mTracks.size(); ++i) { onTimeUpdate(i, 0, 0ll); } @@ -1377,6 +1387,21 @@ private: mNTPAnchorUs = ntpTimeUs; mMediaAnchorUs = mLastMediaTimeUs; } + + if (!mAllTracksHaveTime) { + bool allTracksHaveTime = true; + for (size_t i = 0; i < mTracks.size(); ++i) { + TrackInfo *track = &mTracks.editItemAt(i); + if (track->mNTPAnchorUs < 0) { + allTracksHaveTime = false; + break; + } + } + if (allTracksHaveTime) { + mAllTracksHaveTime = true; + ALOGI("Time now established for all tracks."); + } + } } void onAccessUnitComplete( @@ -1403,7 +1428,7 @@ private: TrackInfo *track = &mTracks.editItemAt(trackIndex); - if (mNTPAnchorUs < 0 || mMediaAnchorUs < 0 || track->mNTPAnchorUs < 0) { + if (!mAllTracksHaveTime) { ALOGV("storing accessUnit, no time established yet"); track->mPackets.push_back(accessUnit); return; @@ -1460,7 +1485,7 @@ private: sp<AMessage> msg = mNotify->dup(); msg->setInt32("what", kWhatAccessUnit); msg->setSize("trackIndex", trackIndex); - msg->setObject("accessUnit", accessUnit); + msg->setBuffer("accessUnit", accessUnit); msg->post(); } diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp index 76b507f..3dcd9fc 100644 --- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp +++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp @@ -26,16 +26,16 @@ #include <media/stagefright/SurfaceMediaSource.h> #include <media/mediarecorder.h> -#include <gui/SurfaceTextureClient.h> #include <ui/GraphicBuffer.h> -#include <surfaceflinger/ISurfaceComposer.h> -#include <surfaceflinger/Surface.h> -#include <surfaceflinger/SurfaceComposerClient.h> +#include <gui/SurfaceTextureClient.h> +#include <gui/ISurfaceComposer.h> +#include <gui/Surface.h> +#include <gui/SurfaceComposerClient.h> #include <binder/ProcessState.h> #include <ui/FramebufferNativeWindow.h> -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/MediaBufferGroup.h> #include <media/stagefright/MediaDefs.h> #include <media/stagefright/MetaData.h> @@ -475,7 +475,7 @@ sp<MediaRecorder> SurfaceMediaSourceGLTest::setUpMediaRecorder(int fd, int video mr->setVideoFrameRate(fps); mr->prepare(); ALOGV("Starting MediaRecorder..."); - CHECK_EQ(OK, mr->start()); + CHECK_EQ((status_t)OK, mr->start()); return mr; } @@ -757,7 +757,7 @@ TEST_F(SurfaceMediaSourceTest, DISABLED_EncodingFromCpuYV12BufferNpotWriteMediaS ASSERT_EQ(NO_ERROR, native_window_api_disconnect(mANW.get(), NATIVE_WINDOW_API_CPU)); ALOGV("Stopping MediaRecorder..."); - CHECK_EQ(OK, mr->stop()); + CHECK_EQ((status_t)OK, mr->stop()); mr.clear(); close(fd); } @@ -886,7 +886,7 @@ TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaSameImageEachBufNpotWrite) { mEglSurface = EGL_NO_SURFACE; ALOGV("Stopping MediaRecorder..."); - CHECK_EQ(OK, mr->stop()); + CHECK_EQ((status_t)OK, mr->stop()); mr.clear(); close(fd); } @@ -929,7 +929,7 @@ TEST_F(SurfaceMediaSourceGLTest, EncodingFromGLRgbaDiffImageEachBufNpotWrite) { mEglSurface = EGL_NO_SURFACE; ALOGV("Stopping MediaRecorder..."); - CHECK_EQ(OK, mr->stop()); + CHECK_EQ((status_t)OK, mr->stop()); mr.clear(); close(fd); } diff --git a/media/libstagefright/timedtext/Android.mk b/media/libstagefright/timedtext/Android.mk index 59d0e15..d2d5f7b 100644 --- a/media/libstagefright/timedtext/Android.mk +++ b/media/libstagefright/timedtext/Android.mk @@ -3,14 +3,17 @@ include $(CLEAR_VARS) LOCAL_SRC_FILES:= \ TextDescriptions.cpp \ - TimedTextParser.cpp \ + TimedTextDriver.cpp \ + TimedText3GPPSource.cpp \ + TimedTextSource.cpp \ + TimedTextSRTSource.cpp \ TimedTextPlayer.cpp LOCAL_CFLAGS += -Wno-multichar LOCAL_C_INCLUDES:= \ $(JNI_H_INCLUDE) \ - $(TOP)/frameworks/base/media/libstagefright \ - $(TOP)/frameworks/base/include/media/stagefright/openmax + $(TOP)/frameworks/base/include/media/stagefright/timedtext \ + $(TOP)/frameworks/base/media/libstagefright LOCAL_MODULE:= libstagefright_timedtext diff --git a/media/libstagefright/timedtext/TimedText3GPPSource.cpp b/media/libstagefright/timedtext/TimedText3GPPSource.cpp new file mode 100644 index 0000000..4a3bfd3 --- /dev/null +++ b/media/libstagefright/timedtext/TimedText3GPPSource.cpp @@ -0,0 +1,113 @@ + /* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "TimedText3GPPSource" +#include <utils/Log.h> + +#include <binder/Parcel.h> +#include <media/stagefright/foundation/ADebug.h> // CHECK_XX macro +#include <media/stagefright/MediaBuffer.h> +#include <media/stagefright/MediaDefs.h> // for MEDIA_MIMETYPE_xxx +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MediaSource.h> +#include <media/stagefright/MetaData.h> + +#include "TimedText3GPPSource.h" +#include "TextDescriptions.h" + +namespace android { + +TimedText3GPPSource::TimedText3GPPSource(const sp<MediaSource>& mediaSource) + : mSource(mediaSource) { +} + +TimedText3GPPSource::~TimedText3GPPSource() { +} + +status_t TimedText3GPPSource::read( + int64_t *timeUs, Parcel *parcel, const MediaSource::ReadOptions *options) { + MediaBuffer *textBuffer = NULL; + status_t err = mSource->read(&textBuffer, options); + if (err != OK) { + return err; + } + CHECK(textBuffer != NULL); + textBuffer->meta_data()->findInt64(kKeyTime, timeUs); + // TODO: this is legacy code. when 'timeUs' can be <= 0? + if (*timeUs > 0) { + extractAndAppendLocalDescriptions(*timeUs, textBuffer, parcel); + } + textBuffer->release(); + return OK; +} + +// Each text sample consists of a string of text, optionally with sample +// modifier description. The modifier description could specify a new +// text style for the string of text. These descriptions are present only +// if they are needed. This method is used to extract the modifier +// description and append it at the end of the text. +status_t TimedText3GPPSource::extractAndAppendLocalDescriptions( + int64_t timeUs, const MediaBuffer *textBuffer, Parcel *parcel) { + const void *data; + size_t size = 0; + int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS; + + const char *mime; + CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime)); + CHECK(strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0); + + data = textBuffer->data(); + size = textBuffer->size(); + + if (size > 0) { + parcel->freeData(); + flag |= TextDescriptions::IN_BAND_TEXT_3GPP; + return TextDescriptions::getParcelOfDescriptions( + (const uint8_t *)data, size, flag, timeUs / 1000, parcel); + } + return OK; +} + +// To extract and send the global text descriptions for all the text samples +// in the text track or text file. +// TODO: send error message to application via notifyListener()...? +status_t TimedText3GPPSource::extractGlobalDescriptions(Parcel *parcel) { + const void *data; + size_t size = 0; + int32_t flag = TextDescriptions::GLOBAL_DESCRIPTIONS; + + const char *mime; + CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime)); + CHECK(strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0); + + uint32_t type; + // get the 'tx3g' box content. This box contains the text descriptions + // used to render the text track + if (!mSource->getFormat()->findData( + kKeyTextFormatData, &type, &data, &size)) { + return ERROR_MALFORMED; + } + + if (size > 0) { + flag |= TextDescriptions::IN_BAND_TEXT_3GPP; + return TextDescriptions::getParcelOfDescriptions( + (const uint8_t *)data, size, flag, 0, parcel); + } + return OK; +} + +} // namespace android diff --git a/media/libstagefright/timedtext/TimedText3GPPSource.h b/media/libstagefright/timedtext/TimedText3GPPSource.h new file mode 100644 index 0000000..cb7e47c --- /dev/null +++ b/media/libstagefright/timedtext/TimedText3GPPSource.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TIMED_TEXT_3GPP_SOURCE_H_ +#define TIMED_TEXT_3GPP_SOURCE_H_ + +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MediaSource.h> + +#include "TimedTextSource.h" + +namespace android { + +class MediaBuffer; +class Parcel; + +class TimedText3GPPSource : public TimedTextSource { + public: + TimedText3GPPSource(const sp<MediaSource>& mediaSource); + virtual status_t start() { return mSource->start(); } + virtual status_t stop() { return mSource->stop(); } + virtual status_t read( + int64_t *timeUs, + Parcel *parcel, + const MediaSource::ReadOptions *options = NULL); + virtual status_t extractGlobalDescriptions(Parcel *parcel); + + protected: + virtual ~TimedText3GPPSource(); + + private: + sp<MediaSource> mSource; + + status_t extractAndAppendLocalDescriptions( + int64_t timeUs, const MediaBuffer *textBuffer, Parcel *parcel); + + DISALLOW_EVIL_CONSTRUCTORS(TimedText3GPPSource); +}; + +} // namespace android + +#endif // TIMED_TEXT_3GPP_SOURCE_H_ diff --git a/media/libstagefright/timedtext/TimedTextDriver.cpp b/media/libstagefright/timedtext/TimedTextDriver.cpp new file mode 100644 index 0000000..c70870e --- /dev/null +++ b/media/libstagefright/timedtext/TimedTextDriver.cpp @@ -0,0 +1,222 @@ + /* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "TimedTextDriver" +#include <utils/Log.h> + +#include <binder/IPCThreadState.h> + +#include <media/MediaPlayerInterface.h> +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MediaSource.h> +#include <media/stagefright/DataSource.h> +#include <media/stagefright/Utils.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/ALooper.h> +#include <media/stagefright/timedtext/TimedTextDriver.h> + +#include "TextDescriptions.h" +#include "TimedTextPlayer.h" +#include "TimedTextSource.h" + +namespace android { + +TimedTextDriver::TimedTextDriver( + const wp<MediaPlayerBase> &listener) + : mLooper(new ALooper), + mListener(listener), + mState(UNINITIALIZED) { + mLooper->setName("TimedTextDriver"); + mLooper->start(); + mPlayer = new TimedTextPlayer(listener); + mLooper->registerHandler(mPlayer); +} + +TimedTextDriver::~TimedTextDriver() { + mTextInBandVector.clear(); + mTextOutOfBandVector.clear(); + mLooper->stop(); +} + +status_t TimedTextDriver::setTimedTextTrackIndex_l(int32_t index) { + if (index >= + (int)(mTextInBandVector.size() + mTextOutOfBandVector.size())) { + return BAD_VALUE; + } + + sp<TimedTextSource> source; + if (index < mTextInBandVector.size()) { + source = mTextInBandVector.itemAt(index); + } else { + source = mTextOutOfBandVector.itemAt(index - mTextInBandVector.size()); + } + mPlayer->setDataSource(source); + return OK; +} + +status_t TimedTextDriver::start() { + Mutex::Autolock autoLock(mLock); + switch (mState) { + case UNINITIALIZED: + return INVALID_OPERATION; + case STOPPED: + mPlayer->start(); + break; + case PLAYING: + return OK; + case PAUSED: + mPlayer->resume(); + break; + default: + TRESPASS(); + } + mState = PLAYING; + return OK; +} + +status_t TimedTextDriver::stop() { + return pause(); +} + +// TODO: Test if pause() works properly. +// Scenario 1: start - pause - resume +// Scenario 2: start - seek +// Scenario 3: start - pause - seek - resume +status_t TimedTextDriver::pause() { + Mutex::Autolock autoLock(mLock); + switch (mState) { + case UNINITIALIZED: + return INVALID_OPERATION; + case STOPPED: + return OK; + case PLAYING: + mPlayer->pause(); + break; + case PAUSED: + return OK; + default: + TRESPASS(); + } + mState = PAUSED; + return OK; +} + +status_t TimedTextDriver::resume() { + return start(); +} + +status_t TimedTextDriver::seekToAsync(int64_t timeUs) { + mPlayer->seekToAsync(timeUs); + return OK; +} + +status_t TimedTextDriver::setTimedTextTrackIndex(int32_t index) { + // TODO: This is current implementation for MediaPlayer::disableTimedText(). + // Find better way for readability. + if (index < 0) { + mPlayer->pause(); + return OK; + } + + status_t ret = OK; + Mutex::Autolock autoLock(mLock); + switch (mState) { + case UNINITIALIZED: + ret = INVALID_OPERATION; + break; + case PAUSED: + ret = setTimedTextTrackIndex_l(index); + break; + case PLAYING: + mPlayer->pause(); + ret = setTimedTextTrackIndex_l(index); + if (ret != OK) { + break; + } + mPlayer->start(); + break; + case STOPPED: + // TODO: The only difference between STOPPED and PAUSED is this + // part. Revise the flow from "MediaPlayer::enableTimedText()" and + // remove one of the status, PAUSED and STOPPED, if possible. + ret = setTimedTextTrackIndex_l(index); + if (ret != OK) { + break; + } + mPlayer->start(); + break; + defaut: + TRESPASS(); + } + return ret; +} + +status_t TimedTextDriver::addInBandTextSource( + const sp<MediaSource>& mediaSource) { + sp<TimedTextSource> source = + TimedTextSource::CreateTimedTextSource(mediaSource); + if (source == NULL) { + return ERROR_UNSUPPORTED; + } + Mutex::Autolock autoLock(mLock); + mTextInBandVector.add(source); + if (mState == UNINITIALIZED) { + mState = STOPPED; + } + return OK; +} + +status_t TimedTextDriver::addOutOfBandTextSource( + const Parcel &request) { + // TODO: Define "TimedTextSource::CreateFromURI(uri)" + // and move below lines there..? + + // String values written in Parcel are UTF-16 values. + const String16 uri16 = request.readString16(); + String8 uri = String8(request.readString16()); + + uri.toLower(); + // To support local subtitle file only for now + if (strncasecmp("file://", uri.string(), 7)) { + return ERROR_UNSUPPORTED; + } + sp<DataSource> dataSource = + DataSource::CreateFromURI(uri); + if (dataSource == NULL) { + return ERROR_UNSUPPORTED; + } + + sp<TimedTextSource> source; + if (uri.getPathExtension() == String8(".srt")) { + source = TimedTextSource::CreateTimedTextSource( + dataSource, TimedTextSource::OUT_OF_BAND_FILE_SRT); + } + + if (source == NULL) { + return ERROR_UNSUPPORTED; + } + + Mutex::Autolock autoLock(mLock); + + mTextOutOfBandVector.add(source); + if (mState == UNINITIALIZED) { + mState = STOPPED; + } + return OK; +} + +} // namespace android diff --git a/media/libstagefright/timedtext/TimedTextParser.h b/media/libstagefright/timedtext/TimedTextParser.h deleted file mode 100644 index 44774c2..0000000 --- a/media/libstagefright/timedtext/TimedTextParser.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (C) 2011 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef TIMED_TEXT_PARSER_H_ - -#define TIMED_TEXT_PARSER_H_ - -#include <media/MediaPlayerInterface.h> -#include <media/stagefright/foundation/ABase.h> -#include <media/stagefright/foundation/AString.h> -#include <media/stagefright/MediaSource.h> - -namespace android { - -class DataSource; - -class TimedTextParser : public RefBase { -public: - TimedTextParser(); - virtual ~TimedTextParser(); - - enum FileType { - OUT_OF_BAND_FILE_SRT = 1, - }; - - status_t getText(AString *text, int64_t *startTimeUs, int64_t *endTimeUs, - const MediaSource::ReadOptions *options = NULL); - status_t init(const sp<DataSource> &dataSource, FileType fileType); - void reset(); - -private: - Mutex mLock; - - sp<DataSource> mDataSource; - off64_t mOffset; - - struct TextInfo { - int64_t endTimeUs; - // the offset of the text in the original file - off64_t offset; - int textLen; - }; - - int mIndex; - FileType mFileType; - - // the key indicated the start time of the text - KeyedVector<int64_t, TextInfo> mTextVector; - - status_t getNextInSrtFileFormat( - off64_t *offset, int64_t *startTimeUs, TextInfo *info); - status_t readNextLine(off64_t *offset, AString *data); - - status_t scanFile(); - - DISALLOW_EVIL_CONSTRUCTORS(TimedTextParser); -}; - -} // namespace android - -#endif // TIMED_TEXT_PARSER_H_ - diff --git a/media/libstagefright/timedtext/TimedTextPlayer.cpp b/media/libstagefright/timedtext/TimedTextPlayer.cpp index 3014b0b..bda7b46 100644 --- a/media/libstagefright/timedtext/TimedTextPlayer.cpp +++ b/media/libstagefright/timedtext/TimedTextPlayer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 The Android Open Source Project + * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,399 +18,164 @@ #define LOG_TAG "TimedTextPlayer" #include <utils/Log.h> -#include <binder/IPCThreadState.h> - -#include <media/stagefright/MediaDebug.h> -#include <media/stagefright/MediaDefs.h> +#include <media/stagefright/foundation/ADebug.h> +#include <media/stagefright/foundation/AMessage.h> +#include <media/stagefright/timedtext/TimedTextDriver.h> #include <media/stagefright/MediaErrors.h> -#include <media/stagefright/MediaSource.h> -#include <media/stagefright/MetaData.h> -#include <media/stagefright/MediaBuffer.h> -#include <media/stagefright/FileSource.h> -#include <media/stagefright/Utils.h> +#include <media/MediaPlayerInterface.h> -#include "include/AwesomePlayer.h" #include "TimedTextPlayer.h" -#include "TimedTextParser.h" -#include "TextDescriptions.h" - -namespace android { -struct TimedTextEvent : public TimedEventQueue::Event { - TimedTextEvent( - TimedTextPlayer *player, - void (TimedTextPlayer::*method)()) - : mPlayer(player), - mMethod(method) { - } - -protected: - virtual ~TimedTextEvent() {} - - virtual void fire(TimedEventQueue *queue, int64_t /* now_us */) { - (mPlayer->*mMethod)(); - } +#include "TimedTextSource.h" -private: - TimedTextPlayer *mPlayer; - void (TimedTextPlayer::*mMethod)(); +namespace android { - TimedTextEvent(const TimedTextEvent &); - TimedTextEvent &operator=(const TimedTextEvent &); -}; +static const int64_t kAdjustmentProcessingTimeUs = 100000ll; -TimedTextPlayer::TimedTextPlayer( - AwesomePlayer *observer, - const wp<MediaPlayerBase> &listener, - TimedEventQueue *queue) - : mSource(NULL), - mOutOfBandSource(NULL), - mSeekTimeUs(0), - mStarted(false), - mTextEventPending(false), - mQueue(queue), - mListener(listener), - mObserver(observer), - mTextBuffer(NULL), - mTextParser(NULL), - mTextType(kNoText) { - mTextEvent = new TimedTextEvent(this, &TimedTextPlayer::onTextEvent); +TimedTextPlayer::TimedTextPlayer(const wp<MediaPlayerBase> &listener) + : mListener(listener), + mSource(NULL), + mSendSubtitleGeneration(0) { } TimedTextPlayer::~TimedTextPlayer() { - if (mStarted) { - reset(); + if (mSource != NULL) { + mSource->stop(); + mSource.clear(); + mSource = NULL; } - - mTextTrackVector.clear(); - mTextOutOfBandVector.clear(); } -status_t TimedTextPlayer::start(uint8_t index) { - CHECK(!mStarted); - - if (index >= - mTextTrackVector.size() + mTextOutOfBandVector.size()) { - ALOGE("Incorrect text track index: %d", index); - return BAD_VALUE; - } - - status_t err; - if (index < mTextTrackVector.size()) { // start an in-band text - mSource = mTextTrackVector.itemAt(index); - - err = mSource->start(); - - if (err != OK) { - return err; - } - mTextType = kInBandText; - } else { // start an out-of-band text - OutOfBandText text = - mTextOutOfBandVector.itemAt(index - mTextTrackVector.size()); - - mOutOfBandSource = text.source; - TimedTextParser::FileType fileType = text.type; - - if (mTextParser == NULL) { - mTextParser = new TimedTextParser(); - } - - if ((err = mTextParser->init(mOutOfBandSource, fileType)) != OK) { - return err; - } - mTextType = kOutOfBandText; - } - - // send sample description format - if ((err = extractAndSendGlobalDescriptions()) != OK) { - return err; - } - - int64_t positionUs; - mObserver->getPosition(&positionUs); - seekTo(positionUs); - - postTextEvent(); - - mStarted = true; - - return OK; +void TimedTextPlayer::start() { + sp<AMessage> msg = new AMessage(kWhatSeek, id()); + msg->setInt64("seekTimeUs", -1); + msg->post(); } void TimedTextPlayer::pause() { - CHECK(mStarted); - - cancelTextEvent(); + (new AMessage(kWhatPause, id()))->post(); } void TimedTextPlayer::resume() { - CHECK(mStarted); - - postTextEvent(); -} - -void TimedTextPlayer::reset() { - CHECK(mStarted); - - // send an empty text to clear the screen - notifyListener(MEDIA_TIMED_TEXT); - - cancelTextEvent(); - - mSeeking = false; - mStarted = false; - - if (mTextType == kInBandText) { - if (mTextBuffer != NULL) { - mTextBuffer->release(); - mTextBuffer = NULL; - } - - if (mSource != NULL) { - mSource->stop(); - mSource.clear(); - mSource = NULL; - } - } else { - if (mTextParser != NULL) { - mTextParser.clear(); - mTextParser = NULL; - } - if (mOutOfBandSource != NULL) { - mOutOfBandSource.clear(); - mOutOfBandSource = NULL; - } - } + start(); } -status_t TimedTextPlayer::seekTo(int64_t time_us) { - Mutex::Autolock autoLock(mLock); - - mSeeking = true; - mSeekTimeUs = time_us; - - postTextEvent(); - - return OK; +void TimedTextPlayer::seekToAsync(int64_t timeUs) { + sp<AMessage> msg = new AMessage(kWhatSeek, id()); + msg->setInt64("seekTimeUs", timeUs); + msg->post(); } -status_t TimedTextPlayer::setTimedTextTrackIndex(int32_t index) { - if (index >= - (int)(mTextTrackVector.size() + mTextOutOfBandVector.size())) { - return BAD_VALUE; - } - - if (mStarted) { - reset(); - } - - if (index >= 0) { - return start(index); - } - return OK; +void TimedTextPlayer::setDataSource(sp<TimedTextSource> source) { + sp<AMessage> msg = new AMessage(kWhatSetSource, id()); + msg->setObject("source", source); + msg->post(); } -void TimedTextPlayer::onTextEvent() { - Mutex::Autolock autoLock(mLock); - - if (!mTextEventPending) { - return; - } - mTextEventPending = false; - - if (mData.dataSize() > 0) { - notifyListener(MEDIA_TIMED_TEXT, &mData); - mData.freeData(); - } - - MediaSource::ReadOptions options; - if (mSeeking) { - options.setSeekTo(mSeekTimeUs, - MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC); - mSeeking = false; - - notifyListener(MEDIA_TIMED_TEXT); //empty text to clear the screen - } - - int64_t positionUs, timeUs; - mObserver->getPosition(&positionUs); - - if (mTextType == kInBandText) { - if (mSource->read(&mTextBuffer, &options) != OK) { - return; +void TimedTextPlayer::onMessageReceived(const sp<AMessage> &msg) { + switch (msg->what()) { + case kWhatPause: { + mSendSubtitleGeneration++; + break; } - - mTextBuffer->meta_data()->findInt64(kKeyTime, &timeUs); - } else { - int64_t endTimeUs; - if (mTextParser->getText( - &mText, &timeUs, &endTimeUs, &options) != OK) { - return; - } - } - - if (timeUs > 0) { - extractAndAppendLocalDescriptions(timeUs); - } - - if (mTextType == kInBandText) { - if (mTextBuffer != NULL) { - mTextBuffer->release(); - mTextBuffer = NULL; + case kWhatSeek: { + int64_t seekTimeUs = 0; + msg->findInt64("seekTimeUs", &seekTimeUs); + if (seekTimeUs < 0) { + sp<MediaPlayerBase> listener = mListener.promote(); + if (listener != NULL) { + int32_t positionMs = 0; + listener->getCurrentPosition(&positionMs); + seekTimeUs = positionMs * 1000ll; + } + } + doSeekAndRead(seekTimeUs); + break; + } + case kWhatSendSubtitle: { + int32_t generation; + CHECK(msg->findInt32("generation", &generation)); + if (generation != mSendSubtitleGeneration) { + // Drop obsolete msg. + break; + } + sp<RefBase> obj; + msg->findObject("subtitle", &obj); + if (obj != NULL) { + sp<ParcelEvent> parcelEvent; + parcelEvent = static_cast<ParcelEvent*>(obj.get()); + notifyListener(MEDIA_TIMED_TEXT, &(parcelEvent->parcel)); + } else { + notifyListener(MEDIA_TIMED_TEXT); + } + doRead(); + break; + } + case kWhatSetSource: { + sp<RefBase> obj; + msg->findObject("source", &obj); + if (obj == NULL) break; + if (mSource != NULL) { + mSource->stop(); + } + mSource = static_cast<TimedTextSource*>(obj.get()); + mSource->start(); + Parcel parcel; + if (mSource->extractGlobalDescriptions(&parcel) == OK && + parcel.dataSize() > 0) { + notifyListener(MEDIA_TIMED_TEXT, &parcel); + } else { + notifyListener(MEDIA_TIMED_TEXT); + } + break; } - } else { - mText.clear(); - } - - //send the text now - if (timeUs <= positionUs + 100000ll) { - postTextEvent(); - } else { - postTextEvent(timeUs - positionUs - 100000ll); } } -void TimedTextPlayer::postTextEvent(int64_t delayUs) { - if (mTextEventPending) { - return; - } - - mTextEventPending = true; - mQueue->postEventWithDelay(mTextEvent, delayUs < 0 ? 10000 : delayUs); -} - -void TimedTextPlayer::cancelTextEvent() { - mQueue->cancelEvent(mTextEvent->eventID()); - mTextEventPending = false; +void TimedTextPlayer::doSeekAndRead(int64_t seekTimeUs) { + MediaSource::ReadOptions options; + options.setSeekTo(seekTimeUs, MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC); + doRead(&options); } -void TimedTextPlayer::addTextSource(sp<MediaSource> source) { - Mutex::Autolock autoLock(mLock); - mTextTrackVector.add(source); +void TimedTextPlayer::doRead(MediaSource::ReadOptions* options) { + int64_t timeUs = 0; + sp<ParcelEvent> parcelEvent = new ParcelEvent(); + mSource->read(&timeUs, &(parcelEvent->parcel), options); + postTextEvent(parcelEvent, timeUs); } -status_t TimedTextPlayer::setParameter(int key, const Parcel &request) { - Mutex::Autolock autoLock(mLock); - - if (key == KEY_PARAMETER_TIMED_TEXT_ADD_OUT_OF_BAND_SOURCE) { - const String16 uri16 = request.readString16(); - String8 uri = String8(uri16); - KeyedVector<String8, String8> headers; - - // To support local subtitle file only for now - if (strncasecmp("file://", uri.string(), 7)) { - return INVALID_OPERATION; - } - sp<DataSource> dataSource = - DataSource::CreateFromURI(uri, &headers); - status_t err = dataSource->initCheck(); +void TimedTextPlayer::postTextEvent(const sp<ParcelEvent>& parcel, int64_t timeUs) { + sp<MediaPlayerBase> listener = mListener.promote(); + if (listener != NULL) { + int64_t positionUs, delayUs; + int32_t positionMs = 0; + listener->getCurrentPosition(&positionMs); + positionUs = positionMs * 1000; - if (err != OK) { - return err; - } - - OutOfBandText text; - text.source = dataSource; - if (uri.getPathExtension() == String8(".srt")) { - text.type = TimedTextParser::OUT_OF_BAND_FILE_SRT; + if (timeUs <= positionUs + kAdjustmentProcessingTimeUs) { + delayUs = 0; } else { - return ERROR_UNSUPPORTED; + delayUs = timeUs - positionUs - kAdjustmentProcessingTimeUs; } - - mTextOutOfBandVector.add(text); - - return OK; - } - return INVALID_OPERATION; -} - -void TimedTextPlayer::notifyListener(int msg, const Parcel *parcel) { - if (mListener != NULL) { - sp<MediaPlayerBase> listener = mListener.promote(); - - if (listener != NULL) { - if (parcel && (parcel->dataSize() > 0)) { - listener->sendEvent(msg, 0, 0, parcel); - } else { // send an empty timed text to clear the screen - listener->sendEvent(msg); - } + sp<AMessage> msg = new AMessage(kWhatSendSubtitle, id()); + msg->setInt32("generation", mSendSubtitleGeneration); + if (parcel != NULL) { + msg->setObject("subtitle", parcel); } + msg->post(delayUs); } } -// Each text sample consists of a string of text, optionally with sample -// modifier description. The modifier description could specify a new -// text style for the string of text. These descriptions are present only -// if they are needed. This method is used to extract the modifier -// description and append it at the end of the text. -status_t TimedTextPlayer::extractAndAppendLocalDescriptions(int64_t timeUs) { - const void *data; - size_t size = 0; - int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS; - - if (mTextType == kInBandText) { - const char *mime; - CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime)); - - if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) { - flag |= TextDescriptions::IN_BAND_TEXT_3GPP; - data = mTextBuffer->data(); - size = mTextBuffer->size(); - } else { - // support 3GPP only for now - return ERROR_UNSUPPORTED; +void TimedTextPlayer::notifyListener(int msg, const Parcel *parcel) { + sp<MediaPlayerBase> listener = mListener.promote(); + if (listener != NULL) { + if (parcel != NULL && (parcel->dataSize() > 0)) { + listener->sendEvent(msg, 0, 0, parcel); + } else { // send an empty timed text to clear the screen + listener->sendEvent(msg); } - } else { - data = mText.c_str(); - size = mText.size(); - flag |= TextDescriptions::OUT_OF_BAND_TEXT_SRT; } - - if ((size > 0) && (flag != TextDescriptions::LOCAL_DESCRIPTIONS)) { - mData.freeData(); - return TextDescriptions::getParcelOfDescriptions( - (const uint8_t *)data, size, flag, timeUs / 1000, &mData); - } - - return OK; } -// To extract and send the global text descriptions for all the text samples -// in the text track or text file. -status_t TimedTextPlayer::extractAndSendGlobalDescriptions() { - const void *data; - size_t size = 0; - int32_t flag = TextDescriptions::GLOBAL_DESCRIPTIONS; - - if (mTextType == kInBandText) { - const char *mime; - CHECK(mSource->getFormat()->findCString(kKeyMIMEType, &mime)); - - // support 3GPP only for now - if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) { - uint32_t type; - // get the 'tx3g' box content. This box contains the text descriptions - // used to render the text track - if (!mSource->getFormat()->findData( - kKeyTextFormatData, &type, &data, &size)) { - return ERROR_MALFORMED; - } - - flag |= TextDescriptions::IN_BAND_TEXT_3GPP; - } - } - - if ((size > 0) && (flag != TextDescriptions::GLOBAL_DESCRIPTIONS)) { - Parcel parcel; - if (TextDescriptions::getParcelOfDescriptions( - (const uint8_t *)data, size, flag, 0, &parcel) == OK) { - if (parcel.dataSize() > 0) { - notifyListener(MEDIA_TIMED_TEXT, &parcel); - } - } - } - - return OK; -} -} +} // namespace android diff --git a/media/libstagefright/timedtext/TimedTextPlayer.h b/media/libstagefright/timedtext/TimedTextPlayer.h index a744db5..837beeb 100644 --- a/media/libstagefright/timedtext/TimedTextPlayer.h +++ b/media/libstagefright/timedtext/TimedTextPlayer.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 The Android Open Source Project + * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,99 +15,61 @@ */ #ifndef TIMEDTEXT_PLAYER_H_ - #define TIMEDTEXT_PLAYER_H_ -#include <media/MediaPlayerInterface.h> +#include <binder/Parcel.h> #include <media/stagefright/foundation/ABase.h> -#include <media/stagefright/foundation/AString.h> +#include <media/stagefright/foundation/AHandler.h> +#include <media/stagefright/MediaSource.h> +#include <utils/RefBase.h> -#include "include/TimedEventQueue.h" -#include "TimedTextParser.h" +#include "TimedTextSource.h" namespace android { -class MediaSource; -class AwesomePlayer; -class MediaBuffer; +class AMessage; +class MediaPlayerBase; +class TimedTextDriver; +class TimedTextSource; -class TimedTextPlayer { +class TimedTextPlayer : public AHandler { public: - TimedTextPlayer(AwesomePlayer *observer, - const wp<MediaPlayerBase> &listener, - TimedEventQueue *queue); + TimedTextPlayer(const wp<MediaPlayerBase> &listener); virtual ~TimedTextPlayer(); - // index: the index of the text track which will - // be turned on - status_t start(uint8_t index); - + void start(); void pause(); - void resume(); + void seekToAsync(int64_t timeUs); + void setDataSource(sp<TimedTextSource> source); - status_t seekTo(int64_t time_us); - - void addTextSource(sp<MediaSource> source); - - status_t setTimedTextTrackIndex(int32_t index); - status_t setParameter(int key, const Parcel &request); +protected: + virtual void onMessageReceived(const sp<AMessage> &msg); private: - enum TextType { - kNoText = 0, - kInBandText = 1, - kOutOfBandText = 2, + enum { + kWhatPause = 'paus', + kWhatSeek = 'seek', + kWhatSendSubtitle = 'send', + kWhatSetSource = 'ssrc', }; - Mutex mLock; - - sp<MediaSource> mSource; - sp<DataSource> mOutOfBandSource; - - bool mSeeking; - int64_t mSeekTimeUs; - - bool mStarted; - - sp<TimedEventQueue::Event> mTextEvent; - bool mTextEventPending; - - TimedEventQueue *mQueue; - - wp<MediaPlayerBase> mListener; - AwesomePlayer *mObserver; - - MediaBuffer *mTextBuffer; - Parcel mData; - - // for in-band timed text - Vector<sp<MediaSource> > mTextTrackVector; - - // for out-of-band timed text - struct OutOfBandText { - TimedTextParser::FileType type; - sp<DataSource> source; + // To add Parcel into an AMessage as an object, it should be 'RefBase'. + struct ParcelEvent : public RefBase { + Parcel parcel; }; - Vector<OutOfBandText > mTextOutOfBandVector; - sp<TimedTextParser> mTextParser; - AString mText; - - TextType mTextType; - - void reset(); + wp<MediaPlayerBase> mListener; + sp<TimedTextSource> mSource; + int32_t mSendSubtitleGeneration; + void doSeekAndRead(int64_t seekTimeUs); + void doRead(MediaSource::ReadOptions* options = NULL); void onTextEvent(); - void postTextEvent(int64_t delayUs = -1); - void cancelTextEvent(); - + void postTextEvent(const sp<ParcelEvent>& parcel = NULL, int64_t timeUs = -1); void notifyListener(int msg, const Parcel *parcel = NULL); - status_t extractAndAppendLocalDescriptions(int64_t timeUs); - status_t extractAndSendGlobalDescriptions(); - DISALLOW_EVIL_CONSTRUCTORS(TimedTextPlayer); }; diff --git a/media/libstagefright/timedtext/TimedTextParser.cpp b/media/libstagefright/timedtext/TimedTextSRTSource.cpp index 0bada16..3752d34 100644 --- a/media/libstagefright/timedtext/TimedTextParser.cpp +++ b/media/libstagefright/timedtext/TimedTextSRTSource.cpp @@ -1,5 +1,5 @@ -/* - * Copyright (C) 2011 The Android Open Source Project + /* + * Copyright (C) 2012 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,149 +14,126 @@ * limitations under the License. */ -#include "TimedTextParser.h" +//#define LOG_NDEBUG 0 +#define LOG_TAG "TimedTextSRTSource" +#include <utils/Log.h> + +#include <binder/Parcel.h> +#include <media/stagefright/foundation/AString.h> #include <media/stagefright/DataSource.h> +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MediaSource.h> + +#include "TimedTextSRTSource.h" +#include "TextDescriptions.h" namespace android { -TimedTextParser::TimedTextParser() - : mDataSource(NULL), - mOffset(0), - mIndex(0) { +TimedTextSRTSource::TimedTextSRTSource(const sp<DataSource>& dataSource) + : mSource(dataSource), + mIndex(0) { } -TimedTextParser::~TimedTextParser() { - reset(); +TimedTextSRTSource::~TimedTextSRTSource() { } -status_t TimedTextParser::init( - const sp<DataSource> &dataSource, FileType fileType) { - mDataSource = dataSource; - mFileType = fileType; - - status_t err; - if ((err = scanFile()) != OK) { +status_t TimedTextSRTSource::start() { + status_t err = scanFile(); + if (err != OK) { reset(); - return err; } - - return OK; + return err; } -void TimedTextParser::reset() { - mDataSource.clear(); +void TimedTextSRTSource::reset() { mTextVector.clear(); - mOffset = 0; mIndex = 0; } -// scan the text file to get start/stop time and the -// offset of each piece of text content -status_t TimedTextParser::scanFile() { - if (mFileType != OUT_OF_BAND_FILE_SRT) { - return ERROR_UNSUPPORTED; +status_t TimedTextSRTSource::stop() { + reset(); + return OK; +} + +status_t TimedTextSRTSource::read( + int64_t *timeUs, + Parcel *parcel, + const MediaSource::ReadOptions *options) { + int64_t endTimeUs; + AString text; + status_t err = getText(options, &text, timeUs, &endTimeUs); + if (err != OK) { + return err; + } + + if (*timeUs > 0) { + extractAndAppendLocalDescriptions(*timeUs, text, parcel); } + return OK; +} +status_t TimedTextSRTSource::scanFile() { off64_t offset = 0; int64_t startTimeUs; bool endOfFile = false; while (!endOfFile) { TextInfo info; - status_t err = getNextInSrtFileFormat(&offset, &startTimeUs, &info); - - if (err != OK) { - if (err == ERROR_END_OF_STREAM) { + status_t err = getNextSubtitleInfo(&offset, &startTimeUs, &info); + switch (err) { + case OK: + mTextVector.add(startTimeUs, info); + break; + case ERROR_END_OF_STREAM: endOfFile = true; - } else { + break; + default: return err; - } - } else { - mTextVector.add(startTimeUs, info); } } - if (mTextVector.isEmpty()) { return ERROR_MALFORMED; } return OK; } -// read one line started from *offset and store it into data. -status_t TimedTextParser::readNextLine(off64_t *offset, AString *data) { - char character; - - data->clear(); - - while (true) { - ssize_t err; - if ((err = mDataSource->readAt(*offset, &character, 1)) < 1) { - if (err == 0) { - return ERROR_END_OF_STREAM; - } - return ERROR_IO; - } - - (*offset) ++; - - // a line could end with CR, LF or CR + LF - if (character == 10) { - break; - } else if (character == 13) { - if ((err = mDataSource->readAt(*offset, &character, 1)) < 1) { - if (err == 0) { // end of the stream - return OK; - } - return ERROR_IO; - } - - (*offset) ++; - - if (character != 10) { - (*offset) --; - } - break; - } - - data->append(character); - } - - return OK; -} - /* SRT format: - * Subtitle number - * Start time --> End time - * Text of subtitle (one or more lines) - * Blank line + * Subtitle number + * Start time --> End time + * Text of subtitle (one or more lines) + * Blank lines * * .srt file example: - * 1 - * 00:00:20,000 --> 00:00:24,400 - * Altocumulus clouds occur between six thousand + * 1 + * 00:00:20,000 --> 00:00:24,400 + * Altocumulus clouds occr between six thousand * - * 2 - * 00:00:24,600 --> 00:00:27,800 - * and twenty thousand feet above ground level. + * 2 + * 00:00:24,600 --> 00:00:27,800 + * and twenty thousand feet above ground level. */ -status_t TimedTextParser::getNextInSrtFileFormat( - off64_t *offset, int64_t *startTimeUs, TextInfo *info) { +status_t TimedTextSRTSource::getNextSubtitleInfo( + off64_t *offset, int64_t *startTimeUs, TextInfo *info) { AString data; status_t err; - if ((err = readNextLine(offset, &data)) != OK) { - return err; - } - // to skip the first line + // To skip blank lines. + do { + if ((err = readNextLine(offset, &data)) != OK) { + return err; + } + data.trim(); + } while (data.empty()); + + // Just ignore the first non-blank line which is subtitle sequence number. if ((err = readNextLine(offset, &data)) != OK) { return err; } - int hour1, hour2, min1, min2, sec1, sec2, msec1, msec2; // the start time format is: hours:minutes:seconds,milliseconds // 00:00:24,600 --> 00:00:27,800 if (sscanf(data.c_str(), "%02d:%02d:%02d,%03d --> %02d:%02d:%02d,%03d", - &hour1, &min1, &sec1, &msec1, &hour2, &min2, &sec2, &msec2) != 8) { + &hour1, &min1, &sec1, &msec1, &hour2, &min2, &sec2, &msec2) != 8) { return ERROR_MALFORMED; } @@ -167,7 +144,6 @@ status_t TimedTextParser::getNextInSrtFileFormat( } info->offset = *offset; - bool needMoreData = true; while (needMoreData) { if ((err = readNextLine(offset, &data)) != OK) { @@ -186,25 +162,56 @@ status_t TimedTextParser::getNextInSrtFileFormat( } } } - info->textLen = *offset - info->offset; - return OK; } -status_t TimedTextParser::getText( - AString *text, int64_t *startTimeUs, int64_t *endTimeUs, - const MediaSource::ReadOptions *options) { - Mutex::Autolock autoLock(mLock); +status_t TimedTextSRTSource::readNextLine(off64_t *offset, AString *data) { + data->clear(); + while (true) { + ssize_t readSize; + char character; + if ((readSize = mSource->readAt(*offset, &character, 1)) < 1) { + if (readSize == 0) { + return ERROR_END_OF_STREAM; + } + return ERROR_IO; + } - text->clear(); + (*offset)++; + + // a line could end with CR, LF or CR + LF + if (character == 10) { + break; + } else if (character == 13) { + if ((readSize = mSource->readAt(*offset, &character, 1)) < 1) { + if (readSize == 0) { // end of the stream + return OK; + } + return ERROR_IO; + } + (*offset)++; + if (character != 10) { + (*offset)--; + } + break; + } + data->append(character); + } + return OK; +} + +status_t TimedTextSRTSource::getText( + const MediaSource::ReadOptions *options, + AString *text, int64_t *startTimeUs, int64_t *endTimeUs) { + text->clear(); int64_t seekTimeUs; MediaSource::ReadOptions::SeekMode mode; - if (options && options->getSeekTo(&seekTimeUs, &mode)) { - int64_t lastEndTimeUs = mTextVector.valueAt(mTextVector.size() - 1).endTimeUs; + if (options != NULL && options->getSeekTo(&seekTimeUs, &mode)) { + int64_t lastEndTimeUs = + mTextVector.valueAt(mTextVector.size() - 1).endTimeUs; int64_t firstStartTimeUs = mTextVector.keyAt(0); - if (seekTimeUs < 0 || seekTimeUs > lastEndTimeUs) { return ERROR_OUT_OF_RANGE; } else if (seekTimeUs < firstStartTimeUs) { @@ -227,31 +234,42 @@ status_t TimedTextParser::getText( low = mid + 1; } else { if ((high == mid + 1) - && (seekTimeUs < mTextVector.keyAt(high))) { + && (seekTimeUs < mTextVector.keyAt(high))) { break; } high = mid - 1; } } - mIndex = mid; } } - - TextInfo info = mTextVector.valueAt(mIndex); + const TextInfo &info = mTextVector.valueAt(mIndex); *startTimeUs = mTextVector.keyAt(mIndex); *endTimeUs = info.endTimeUs; - mIndex ++; + mIndex++; char *str = new char[info.textLen]; - if (mDataSource->readAt(info.offset, str, info.textLen) < info.textLen) { + if (mSource->readAt(info.offset, str, info.textLen) < info.textLen) { delete[] str; return ERROR_IO; } - text->append(str, info.textLen); delete[] str; return OK; } +status_t TimedTextSRTSource::extractAndAppendLocalDescriptions( + int64_t timeUs, const AString &text, Parcel *parcel) { + const void *data = text.c_str(); + size_t size = text.size(); + int32_t flag = TextDescriptions::LOCAL_DESCRIPTIONS | + TextDescriptions::OUT_OF_BAND_TEXT_SRT; + + if (size > 0) { + return TextDescriptions::getParcelOfDescriptions( + (const uint8_t *)data, size, flag, timeUs / 1000, parcel); + } + return OK; +} + } // namespace android diff --git a/media/libstagefright/timedtext/TimedTextSRTSource.h b/media/libstagefright/timedtext/TimedTextSRTSource.h new file mode 100644 index 0000000..a0734d9 --- /dev/null +++ b/media/libstagefright/timedtext/TimedTextSRTSource.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TIMED_TEXT_SRT_SOURCE_H_ +#define TIMED_TEXT_SRT_SOURCE_H_ + +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MediaSource.h> +#include <utils/Compat.h> // off64_t + +#include "TimedTextSource.h" + +namespace android { + +class AString; +class DataSource; +class MediaBuffer; +class Parcel; + +class TimedTextSRTSource : public TimedTextSource { + public: + TimedTextSRTSource(const sp<DataSource>& dataSource); + virtual status_t start(); + virtual status_t stop(); + virtual status_t read( + int64_t *timeUs, + Parcel *parcel, + const MediaSource::ReadOptions *options = NULL); + + protected: + virtual ~TimedTextSRTSource(); + + private: + sp<DataSource> mSource; + + struct TextInfo { + int64_t endTimeUs; + // The offset of the text in the original file. + off64_t offset; + int textLen; + }; + + int mIndex; + KeyedVector<int64_t, TextInfo> mTextVector; + + void reset(); + status_t scanFile(); + status_t getNextSubtitleInfo( + off64_t *offset, int64_t *startTimeUs, TextInfo *info); + status_t readNextLine(off64_t *offset, AString *data); + status_t getText( + const MediaSource::ReadOptions *options, + AString *text, int64_t *startTimeUs, int64_t *endTimeUs); + status_t extractAndAppendLocalDescriptions( + int64_t timeUs, const AString &text, Parcel *parcel); + + DISALLOW_EVIL_CONSTRUCTORS(TimedTextSRTSource); +}; + +} // namespace android + +#endif // TIMED_TEXT_SRT_SOURCE_H_ diff --git a/media/libstagefright/timedtext/TimedTextSource.cpp b/media/libstagefright/timedtext/TimedTextSource.cpp new file mode 100644 index 0000000..ffbe1c3 --- /dev/null +++ b/media/libstagefright/timedtext/TimedTextSource.cpp @@ -0,0 +1,62 @@ + /* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +//#define LOG_NDEBUG 0 +#define LOG_TAG "TimedTextSource" +#include <utils/Log.h> + +#include <media/stagefright/foundation/ADebug.h> // CHECK_XX macro +#include <media/stagefright/DataSource.h> +#include <media/stagefright/MediaDefs.h> // for MEDIA_MIMETYPE_xxx +#include <media/stagefright/MediaSource.h> +#include <media/stagefright/MetaData.h> + +#include "TimedTextSource.h" + +#include "TimedText3GPPSource.h" +#include "TimedTextSRTSource.h" + +namespace android { + +// static +sp<TimedTextSource> TimedTextSource::CreateTimedTextSource( + const sp<MediaSource>& mediaSource) { + const char *mime; + CHECK(mediaSource->getFormat()->findCString(kKeyMIMEType, &mime)); + if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) == 0) { + return new TimedText3GPPSource(mediaSource); + } + ALOGE("Unsupported mime type for subtitle. : %s", mime); + return NULL; +} + +// static +sp<TimedTextSource> TimedTextSource::CreateTimedTextSource( + const sp<DataSource>& dataSource, FileType filetype) { + switch(filetype) { + case OUT_OF_BAND_FILE_SRT: + return new TimedTextSRTSource(dataSource); + case OUT_OF_BAND_FILE_SMI: + // TODO: Implement for SMI. + ALOGE("Supporting SMI is not implemented yet"); + break; + default: + ALOGE("Undefined subtitle format. : %d", filetype); + } + return NULL; +} + +} // namespace android diff --git a/media/libstagefright/timedtext/TimedTextSource.h b/media/libstagefright/timedtext/TimedTextSource.h new file mode 100644 index 0000000..06bae71 --- /dev/null +++ b/media/libstagefright/timedtext/TimedTextSource.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef TIMED_TEXT_SOURCE_H_ +#define TIMED_TEXT_SOURCE_H_ + +#include <media/stagefright/foundation/ABase.h> // for DISALLOW_XXX macro. +#include <media/stagefright/MediaErrors.h> +#include <media/stagefright/MediaSource.h> // for MediaSource::ReadOptions +#include <utils/RefBase.h> + +namespace android { + +class DataSource; +class Parcel; + +class TimedTextSource : public RefBase { + public: + enum FileType { + OUT_OF_BAND_FILE_SRT = 1, + OUT_OF_BAND_FILE_SMI = 2, + }; + static sp<TimedTextSource> CreateTimedTextSource( + const sp<MediaSource>& source); + static sp<TimedTextSource> CreateTimedTextSource( + const sp<DataSource>& source, FileType filetype); + TimedTextSource() {} + virtual status_t start() = 0; + virtual status_t stop() = 0; + // Returns subtitle parcel and its start time. + virtual status_t read( + int64_t *timeUs, + Parcel *parcel, + const MediaSource::ReadOptions *options = NULL) = 0; + virtual status_t extractGlobalDescriptions(Parcel *parcel) { + return INVALID_OPERATION; + } + + protected: + virtual ~TimedTextSource() { } + + private: + DISALLOW_EVIL_CONSTRUCTORS(TimedTextSource); +}; + +} // namespace android + +#endif // TIMED_TEXT_SOURCE_H_ diff --git a/media/libstagefright/yuv/YUVCanvas.cpp b/media/libstagefright/yuv/YUVCanvas.cpp index 38aa779..4c9fee8 100644 --- a/media/libstagefright/yuv/YUVCanvas.cpp +++ b/media/libstagefright/yuv/YUVCanvas.cpp @@ -17,7 +17,7 @@ #define LOG_NDEBUG 0 #define LOG_TAG "YUVCanvas" -#include <media/stagefright/MediaDebug.h> +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/YUVCanvas.h> #include <media/stagefright/YUVImage.h> #include <ui/Rect.h> diff --git a/media/libstagefright/yuv/YUVImage.cpp b/media/libstagefright/yuv/YUVImage.cpp index 0d67c96..7b9000b 100644 --- a/media/libstagefright/yuv/YUVImage.cpp +++ b/media/libstagefright/yuv/YUVImage.cpp @@ -17,9 +17,9 @@ #define LOG_NDEBUG 0 #define LOG_TAG "YUVImage" +#include <media/stagefright/foundation/ADebug.h> #include <media/stagefright/YUVImage.h> #include <ui/Rect.h> -#include <media/stagefright/MediaDebug.h> namespace android { diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp index f078192..1520c01 100644 --- a/media/mediaserver/main_mediaserver.cpp +++ b/media/mediaserver/main_mediaserver.cpp @@ -15,10 +15,7 @@ ** limitations under the License. */ -// System headers required for setgroups, etc. -#include <sys/types.h> -#include <unistd.h> -#include <grp.h> +#define LOG_TAG "mediaserver" #include <binder/IPCThreadState.h> #include <binder/ProcessState.h> @@ -29,7 +26,6 @@ #include <CameraService.h> #include <MediaPlayerService.h> #include <AudioPolicyService.h> -#include <private/android_filesystem_config.h> using namespace android; diff --git a/media/mtp/Android.mk b/media/mtp/Android.mk index e590bab..fc7fc4f 100644 --- a/media/mtp/Android.mk +++ b/media/mtp/Android.mk @@ -39,6 +39,9 @@ LOCAL_MODULE:= libmtp LOCAL_CFLAGS := -DMTP_DEVICE -DMTP_HOST +# Needed for <bionic_time.h> +LOCAL_C_INCLUDES := bionic/libc/private + LOCAL_SHARED_LIBRARIES := libutils libcutils libusbhost libbinder include $(BUILD_SHARED_LIBRARY) |