diff options
61 files changed, 6000 insertions, 1738 deletions
diff --git a/include/hardware/activity_recognition.h b/include/hardware/activity_recognition.h new file mode 100644 index 0000000..6ae90b7 --- /dev/null +++ b/include/hardware/activity_recognition.h @@ -0,0 +1,202 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Activity Recognition HAL. The goal is to provide low power, low latency, always-on activity + * recognition implemented in hardware (i.e. these activity recognition algorithms/classifers + * should NOT be run on the AP). By low power we mean that this may be activated 24/7 without + * impacting the battery drain speed (goal in order of 1mW including the power for sensors). + * This HAL does not specify the input sources that are used towards detecting these activities. + * It has one monitor interface which can be used to batch activities for always-on + * activity_recognition and if the latency is zero, the same interface can be used for low latency + * detection. + */ + +#ifndef ANDROID_ACTIVITY_RECOGNITION_INTERFACE_H +#define ANDROID_ACTIVITY_RECOGNITION_INTERFACE_H + +#include <hardware/hardware.h> + +__BEGIN_DECLS + +#define ACTIVITY_RECOGNITION_HEADER_VERSION 1 +#define ACTIVITY_RECOGNITION_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION_2(0, 1, ACTIVITY_RECOGNITION_HEADER_VERSION) + +#define ACTIVITY_RECOGNITION_HARDWARE_MODULE_ID "activity_recognition" +#define ACTIVITY_RECOGNITION_HARDWARE_INTERFACE "activity_recognition_hw_if" + +/* + * Define constants for various activity types. Multiple activities may be active at the same time + * and sometimes none of these activities may be active. + */ + +/* Reserved. get_supported_activities_list() should not return this activity. */ +#define ACTIVITY_RESERVED (0) + +#define ACTIVITY_IN_VEHICLE (1) + +#define ACTIVITY_ON_BICYCLE (2) + +#define ACTIVITY_WALKING (3) + +#define ACTIVITY_RUNNING (4) + +#define ACTIVITY_STILL (5) + +#define ACTIVITY_TILTING (6) + +/* Values for activity_event.event_types. */ +enum { + /* + * A flush_complete event which indicates that a flush() has been successfully completed. This + * does not correspond to any activity/event. An event of this type should be added to the end + * of a batch FIFO and it indicates that all the events in the batch FIFO have been successfully + * reported to the framework. An event of this type should be generated only if flush() has been + * explicitly called and if the FIFO is empty at the time flush() is called it should trivially + * return a flush_complete_event to indicate that the FIFO is empty. + * + * A flush complete event should have the following parameters set. + * activity_event_t.event_type = ACTIVITY_EVENT_TYPE_FLUSH_COMPLETE + * activity_event_t.activity = ACTIVITY_RESERVED + * activity_event_t.timestamp = 0 + * activity_event_t.reserved = 0 + * See (*flush)() for more details. + */ + ACTIVITY_EVENT_TYPE_FLUSH_COMPLETE = 0, + + /* Signifies entering an activity. */ + ACTIVITY_EVENT_TYPE_ENTER = 1, + + /* Signifies exiting an activity. */ + ACTIVITY_EVENT_TYPE_EXIT = 2 +}; + +/* + * Each event is a separate activity with event_type indicating whether this activity has started + * or ended. Eg event: (event_type="enter", activity="ON_FOOT", timestamp) + */ +typedef struct activity_event { + /* One of the ACTIVITY_EVENT_TYPE_* constants defined above. */ + uint32_t event_type; + + /* One of ACTIVITY_* constants defined above. */ + uint32_t activity; + + /* Time at which the transition/event has occurred in nanoseconds using elapsedRealTimeNano. */ + int64_t timestamp; + + /* Set to zero. */ + int32_t reserved[4]; +} activity_event_t; + +typedef struct activity_recognition_module { + /** + * Common methods of the activity recognition module. This *must* be the first member of + * activity_recognition_module as users of this structure will cast a hw_module_t to + * activity_recognition_module pointer in contexts where it's known the hw_module_t + * references an activity_recognition_module. + */ + hw_module_t common; + + /* + * List of all activities supported by this module. Each activity is represented as an integer. + * Each value in the list is one of the ACTIVITY_* constants defined above. Return + * value is the size of this list. + */ + int (*get_supported_activities_list)(struct activity_recognition_module* module, + int** activity_list); +} activity_recognition_module_t; + +struct activity_recognition_device; + +typedef struct activity_recognition_callback_procs { + // Callback for activity_data. This is guaranteed to not invoke any HAL methods. + // Memory allocated for the events can be reused after this method returns. + // events - Array of activity_event_t s that are reported. + // count - size of the array. + void (*activity_callback)(const struct activity_recognition_callback_procs* procs, + const activity_event_t* events, int count); +} activity_recognition_callback_procs_t; + +typedef struct activity_recognition_device { + /** + * Common methods of the activity recognition device. This *must* be the first member of + * activity_recognition_device as users of this structure will cast a hw_device_t to + * activity_recognition_device pointer in contexts where it's known the hw_device_t + * references an activity_recognition_device. + */ + hw_device_t common; + + /* + * Sets the callback to invoke when there are events to report. This call overwrites the + * previously registered callback (if any). + */ + void (*register_activity_callback)(const struct activity_recognition_device* dev, + const activity_recognition_callback_procs_t* callback); + + /* + * Activates monitoring of activity transitions. Activities need not be reported as soon as they + * are detected. The detected activities are stored in a FIFO and reported in batches when the + * "max_batch_report_latency" expires or when the batch FIFO is full. The implementation should + * allow the AP to go into suspend mode while the activities are detected and stored in the + * batch FIFO. Whenever events need to be reported (like when the FIFO is full or when the + * max_batch_report_latency has expired for an activity, event pair), it should wake_up the AP + * so that no events are lost. Activities are stored as transitions and they are allowed to + * overlap with each other. Each (activity, event_type) pair can be activated or deactivated + * independently of the other. The HAL implementation needs to keep track of which pairs are + * currently active and needs to detect only those pairs. + * + * activity - The specific activity that needs to be detected. + * event_type - Specific transition of the activity that needs to be detected. + * max_batch_report_latency_ns - a transition can be delayed by at most + * “max_batch_report_latency” nanoseconds. + * Return 0 on success, negative errno code otherwise. + */ + int (*enable_activity_event)(const struct activity_recognition_device* dev, + uint32_t activity, uint32_t event_type, int64_t max_batch_report_latency_ns); + + /* + * Disables detection of a specific (activity, event_type) pair. + */ + int (*disable_activity_event)(const struct activity_recognition_device* dev, + uint32_t activity, uint32_t event_type); + + /* + * Flush all the batch FIFOs. Report all the activities that were stored in the FIFO so far as + * if max_batch_report_latency had expired. This shouldn't change the latency in any way. Add + * a flush_complete_event to indicate the end of the FIFO after all events are delivered. + * See ACTIVITY_EVENT_TYPE_FLUSH_COMPLETE for more details. + * Return 0 on success, negative errno code otherwise. + */ + int (*flush)(const struct activity_recognition_device* dev); + + // Must be set to NULL. + void (*reserved_procs[16 - 4])(void); +} activity_recognition_device_t; + +static inline int activity_recognition_open(const hw_module_t* module, + activity_recognition_device_t** device) { + return module->methods->open(module, + ACTIVITY_RECOGNITION_HARDWARE_INTERFACE, (hw_device_t**)device); +} + +static inline int activity_recognition_close(activity_recognition_device_t* device) { + return device->common.close(&device->common); +} + +__END_DECLS + +#endif // ANDROID_ACTIVITY_RECOGNITION_INTERFACE_H diff --git a/include/hardware/audio.h b/include/hardware/audio.h index 6ba2544..d0648b7 100644 --- a/include/hardware/audio.h +++ b/include/hardware/audio.h @@ -54,7 +54,10 @@ __BEGIN_DECLS #define AUDIO_DEVICE_API_VERSION_0_0 HARDWARE_DEVICE_API_VERSION(0, 0) #define AUDIO_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION(1, 0) #define AUDIO_DEVICE_API_VERSION_2_0 HARDWARE_DEVICE_API_VERSION(2, 0) -#define AUDIO_DEVICE_API_VERSION_CURRENT AUDIO_DEVICE_API_VERSION_2_0 +#define AUDIO_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0) +#define AUDIO_DEVICE_API_VERSION_CURRENT AUDIO_DEVICE_API_VERSION_3_0 +/* Minimal audio HAL version supported by the audio framework */ +#define AUDIO_DEVICE_API_VERSION_MIN AUDIO_DEVICE_API_VERSION_2_0 /** * List of known audio HAL modules. This is the base name of the audio HAL @@ -97,6 +100,9 @@ __BEGIN_DECLS /* Screen state */ #define AUDIO_PARAMETER_KEY_SCREEN_STATE "screen_state" +/* Bluetooth SCO wideband */ +#define AUDIO_PARAMETER_KEY_BT_SCO_WB "bt_wbs" + /** * audio stream parameters */ @@ -257,6 +263,11 @@ typedef enum { */ struct audio_stream_out { + /** + * Common methods of the audio stream out. This *must* be the first member of audio_stream_out + * as users of this structure will cast a audio_stream to audio_stream_out pointer in contexts + * where it's known the audio_stream references an audio_stream_out. + */ struct audio_stream common; /** @@ -380,6 +391,11 @@ struct audio_stream_out { typedef struct audio_stream_out audio_stream_out_t; struct audio_stream_in { + /** + * Common methods of the audio stream in. This *must* be the first member of audio_stream_in + * as users of this structure will cast a audio_stream to audio_stream_in pointer in contexts + * where it's known the audio_stream references an audio_stream_in. + */ struct audio_stream common; /** set the input gain for the audio driver. This method is for @@ -436,6 +452,11 @@ struct audio_module { }; struct audio_hw_device { + /** + * Common methods of the audio device. This *must* be the first member of audio_hw_device + * as users of this structure will cast a hw_device_t to audio_hw_device pointer in contexts + * where it's known the hw_device_t references an audio_hw_device. + */ struct hw_device_t common; /** @@ -543,6 +564,38 @@ struct audio_hw_device { * method may leave it set to NULL. */ int (*get_master_mute)(struct audio_hw_device *dev, bool *mute); + + /** + * Routing control + */ + + /* Creates an audio patch between several source and sink ports. + * The handle is allocated by the HAL and should be unique for this + * audio HAL module. */ + int (*create_audio_patch)(struct audio_hw_device *dev, + unsigned int num_sources, + const struct audio_port_config *sources, + unsigned int num_sinks, + const struct audio_port_config *sinks, + audio_patch_handle_t *handle); + + /* Release an audio patch */ + int (*release_audio_patch)(struct audio_hw_device *dev, + audio_patch_handle_t handle); + + /* Fills the list of supported attributes for a given audio port. + * As input, "port" contains the information (type, role, address etc...) + * needed by the HAL to identify the port. + * As output, "port" contains possible attributes (sampling rates, formats, + * channel masks, gain controllers...) for this port. + */ + int (*get_audio_port)(struct audio_hw_device *dev, + struct audio_port *port); + + /* Set audio port configuration */ + int (*set_audio_port_config)(struct audio_hw_device *dev, + const struct audio_port_config *config); + }; typedef struct audio_hw_device audio_hw_device_t; diff --git a/include/hardware/audio_alsaops.h b/include/hardware/audio_alsaops.h new file mode 100644 index 0000000..0d266ff --- /dev/null +++ b/include/hardware/audio_alsaops.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* This file contains shared utility functions to handle the tinyalsa + * implementation for Android internal audio, generally in the hardware layer. + * Some routines may log a fatal error on failure, as noted. + */ + +#ifndef ANDROID_AUDIO_ALSAOPS_H +#define ANDROID_AUDIO_ALSAOPS_H + +#include <cutils/log.h> +#include <system/audio.h> +#include <tinyalsa/asoundlib.h> + +__BEGIN_DECLS + +/* Converts audio_format to pcm_format. + * Parameters: + * format the audio_format_t to convert + * + * Logs a fatal error if format is not a valid convertible audio_format_t. + */ +static inline enum pcm_format pcm_format_from_audio_format(audio_format_t format) +{ + switch (format) { +#ifdef HAVE_BIG_ENDIAN + case AUDIO_FORMAT_PCM_16_BIT: + return PCM_FORMAT_S16_BE; + case AUDIO_FORMAT_PCM_24_BIT_PACKED: + return PCM_FORMAT_S24_3BE; + case AUDIO_FORMAT_PCM_32_BIT: + return PCM_FORMAT_S32_BE; + case AUDIO_FORMAT_PCM_8_24_BIT: + return PCM_FORMAT_S24_BE; +#else + case AUDIO_FORMAT_PCM_16_BIT: + return PCM_FORMAT_S16_LE; + case AUDIO_FORMAT_PCM_24_BIT_PACKED: + return PCM_FORMAT_S24_3LE; + case AUDIO_FORMAT_PCM_32_BIT: + return PCM_FORMAT_S32_LE; + case AUDIO_FORMAT_PCM_8_24_BIT: + return PCM_FORMAT_S24_LE; +#endif + case AUDIO_FORMAT_PCM_FLOAT: /* there is no equivalent for float */ + default: + LOG_ALWAYS_FATAL("pcm_format_from_audio_format: invalid audio format %#x", format); + return 0; + } +} + +/* Converts pcm_format to audio_format. + * Parameters: + * format the pcm_format to convert + * + * Logs a fatal error if format is not a valid convertible pcm_format. + */ +static inline audio_format_t audio_format_from_pcm_format(enum pcm_format format) +{ + switch (format) { +#ifdef HAVE_BIG_ENDIAN + case PCM_FORMAT_S16_BE: + return AUDIO_FORMAT_PCM_16_BIT; + case PCM_FORMAT_S24_3BE: + return AUDIO_FORMAT_PCM_24_BIT_PACKED; + case PCM_FORMAT_S24_BE: + return AUDIO_FORMAT_PCM_8_24_BIT; + case PCM_FORMAT_S32_BE: + return AUDIO_FORMAT_PCM_32_BIT; +#else + case PCM_FORMAT_S16_LE: + return AUDIO_FORMAT_PCM_16_BIT; + case PCM_FORMAT_S24_3LE: + return AUDIO_FORMAT_PCM_24_BIT_PACKED; + case PCM_FORMAT_S24_LE: + return AUDIO_FORMAT_PCM_8_24_BIT; + case PCM_FORMAT_S32_LE: + return AUDIO_FORMAT_PCM_32_BIT; +#endif + default: + LOG_ALWAYS_FATAL("audio_format_from_pcm_format: invalid pcm format %#x", format); + return 0; + } +} + +__END_DECLS + +#endif /* ANDROID_AUDIO_ALSAOPS_H */ diff --git a/include/hardware/audio_effect.h b/include/hardware/audio_effect.h index b49d02d..ee48e4c 100644 --- a/include/hardware/audio_effect.h +++ b/include/hardware/audio_effect.h @@ -815,7 +815,7 @@ typedef struct buffer_config_s { uint32_t samplingRate; // sampling rate uint32_t channels; // channel mask (see audio_channel_mask_t in audio.h) buffer_provider_t bufferProvider; // buffer provider - uint8_t format; // Audio format (see see audio_format_t in audio.h) + uint8_t format; // Audio format (see audio_format_t in audio.h) uint8_t accessMode; // read/write or accumulate in buffer (effect_buffer_access_e) uint16_t mask; // indicates which of the above fields is valid } buffer_config_t; diff --git a/include/hardware/audio_policy.h b/include/hardware/audio_policy.h index 4e75e02..99cb044 100644 --- a/include/hardware/audio_policy.h +++ b/include/hardware/audio_policy.h @@ -248,9 +248,6 @@ struct audio_policy { const audio_offload_info_t *info); }; -/* audio hw module handle used by load_hw_module(), open_output_on_module() - * and open_input_on_module() */ -typedef int audio_module_handle_t; struct audio_policy_service_ops { /* @@ -332,10 +329,9 @@ struct audio_policy_service_ops { audio_io_handle_t output, int delay_ms); - /* reroute a given stream type to the specified output */ - int (*set_stream_output)(void *service, - audio_stream_type_t stream, - audio_io_handle_t output); + /* invalidate a stream type, causing a reroute to an unspecified new output */ + int (*invalidate_stream)(void *service, + audio_stream_type_t stream); /* function enabling to send proprietary informations directly from audio * policy manager to audio hardware interface. */ @@ -424,6 +420,12 @@ typedef struct audio_policy_module { } audio_policy_module_t; struct audio_policy_device { + /** + * Common methods of the audio policy device. This *must* be the first member of + * audio_policy_device as users of this structure will cast a hw_device_t to + * audio_policy_device pointer in contexts where it's known the hw_device_t references an + * audio_policy_device. + */ struct hw_device_t common; int (*create_audio_policy)(const struct audio_policy_device *device, diff --git a/include/hardware/bluetooth.h b/include/hardware/bluetooth.h index 5e100ec..c00a8f7 100644 --- a/include/hardware/bluetooth.h +++ b/include/hardware/bluetooth.h @@ -324,10 +324,6 @@ typedef void (*le_test_mode_callback)(bt_status_t status, uint16_t num_packets); /** TODO: Add callbacks for Link Up/Down and other generic * notifications/callbacks */ -/** Wakelock callback */ -/* Called to take/release wakelock to allow timers to work (temporary kluge) */ -typedef void (*bt_wakelock_callback)(int acquire); - /** Bluetooth DM callback structure. */ typedef struct { /** set to sizeof(bt_callbacks_t) */ @@ -344,7 +340,6 @@ typedef struct { callback_thread_event thread_evt_cb; dut_mode_recv_callback dut_mode_recv_cb; le_test_mode_callback le_test_mode_cb; - bt_wakelock_callback bt_wakelock_cb; } bt_callbacks_t; /** NOTE: By default, no profiles are initialized at the time of init/enable. diff --git a/include/hardware/bt_gatt_client.h b/include/hardware/bt_gatt_client.h index 11b146d..d650671 100644 --- a/include/hardware/bt_gatt_client.h +++ b/include/hardware/bt_gatt_client.h @@ -159,6 +159,21 @@ typedef void (*listen_callback)(int status, int server_if); /** Callback invoked when the MTU for a given connection changes */ typedef void (*configure_mtu_callback)(int conn_id, int status, int mtu); +/** Callback invoked when a scan filter configuration command has completed */ +typedef void (*scan_filter_callback)(int action, int status); + +/** Callback invoked when multi-adv enable operation has completed */ +typedef void (*multi_adv_enable_callback)(int client_if, int status); + +/** Callback invoked when multi-adv param update operation has completed */ +typedef void (*multi_adv_update_callback)(int client_if, int status); + +/** Callback invoked when multi-adv instance data set operation has completed */ +typedef void (*multi_adv_data_callback)(int client_if, int status); + +/** Callback invoked when multi-adv disable operation has completed */ +typedef void (*multi_adv_disable_callback)(int client_if, int status); + typedef struct { register_client_callback register_client_cb; scan_result_callback scan_result_cb; @@ -179,6 +194,11 @@ typedef struct { read_remote_rssi_callback read_remote_rssi_cb; listen_callback listen_cb; configure_mtu_callback configure_mtu_cb; + scan_filter_callback scan_filter_cb; + multi_adv_enable_callback multi_adv_enable_cb; + multi_adv_update_callback multi_adv_update_cb; + multi_adv_data_callback multi_adv_data_cb; + multi_adv_disable_callback multi_adv_disable_cb; } btgatt_client_callbacks_t; /** Represents the standard BT-GATT client interface. */ @@ -191,11 +211,11 @@ typedef struct { bt_status_t (*unregister_client)(int client_if ); /** Start or stop LE device scanning */ - bt_status_t (*scan)( int client_if, bool start ); + bt_status_t (*scan)( bool start ); /** Create a connection to a remote LE or dual-mode device */ bt_status_t (*connect)( int client_if, const bt_bdaddr_t *bd_addr, - bool is_direct ); + bool is_direct, int transport ); /** Disconnect a remote device or cancel a pending connection */ bt_status_t (*disconnect)( int client_if, const bt_bdaddr_t *bd_addr, @@ -276,11 +296,22 @@ typedef struct { /** Request RSSI for a given remote device */ bt_status_t (*read_remote_rssi)( int client_if, const bt_bdaddr_t *bd_addr); + /** Enable or disable scan filtering */ + bt_status_t (*scan_filter_enable)( int enable ); + + /** Configure a scan filter condition */ + bt_status_t (*scan_filter_add)(int type, int company_id, int company_mask, + int len, const bt_uuid_t *p_uuid, const bt_uuid_t *p_uuid_mask, + const bt_bdaddr_t *bd_addr, char addr_type, const char* p_value); + + /** Clear all scan filter conditions */ + bt_status_t (*scan_filter_clear)(); + /** Determine the type of the remote device (LE, BR/EDR, Dual-mode) */ int (*get_device_type)( const bt_bdaddr_t *bd_addr ); /** Set the advertising data or scan response data */ - bt_status_t (*set_adv_data)(int server_if, bool set_scan_rsp, bool include_name, + bt_status_t (*set_adv_data)(int client_if, bool set_scan_rsp, bool include_name, bool include_txpower, int min_interval, int max_interval, int appearance, uint16_t manufacturer_len, char* manufacturer_data, uint16_t service_data_len, char* service_data, @@ -289,8 +320,29 @@ typedef struct { /** Configure the MTU for a given connection */ bt_status_t (*configure_mtu)(int conn_id, int mtu); + /** Sets the LE scan interval and window in units of N*0.625 msec */ + bt_status_t (*set_scan_parameters)(int scan_interval, int scan_window); + + /* Setup the parameters as per spec, user manual specified values and enable multi ADV */ + bt_status_t (*multi_adv_enable)(int client_if, int min_interval,int max_interval,int adv_type, + int chnl_map, int tx_power); + + /* Update the parameters as per spec, user manual specified values and restart multi ADV */ + bt_status_t (*multi_adv_update)(int client_if, int min_interval,int max_interval,int adv_type, + int chnl_map, int tx_power); + + /* Setup the data for the specified instance */ + bt_status_t (*multi_adv_set_inst_data)(int client_if, bool set_scan_rsp, bool include_name, + bool include_txpower, int appearance, uint16_t manufacturer_len, + char* manufacturer_data, uint16_t service_data_len, char* service_data, + uint16_t service_uuid_len, char* service_uuid); + + /* Disable the multi adv instance */ + bt_status_t (*multi_adv_disable)(int client_if); + /** Test mode interface */ bt_status_t (*test_command)( int command, btgatt_test_params_t* params); + } btgatt_client_interface_t; __END_DECLS diff --git a/include/hardware/bt_gatt_server.h b/include/hardware/bt_gatt_server.h index 1a5a400..32f8ef6 100644 --- a/include/hardware/bt_gatt_server.h +++ b/include/hardware/bt_gatt_server.h @@ -129,7 +129,8 @@ typedef struct { bt_status_t (*unregister_server)(int server_if ); /** Create a connection to a remote peripheral */ - bt_status_t (*connect)(int server_if, const bt_bdaddr_t *bd_addr, bool is_direct ); + bt_status_t (*connect)(int server_if, const bt_bdaddr_t *bd_addr, + bool is_direct, int transport); /** Disconnect an established connection or cancel a pending one */ bt_status_t (*disconnect)(int server_if, const bt_bdaddr_t *bd_addr, @@ -168,6 +169,7 @@ typedef struct { /** Send a response to a read/write operation */ bt_status_t (*send_response)(int conn_id, int trans_id, int status, btgatt_response_t *response); + } btgatt_server_interface_t; __END_DECLS diff --git a/include/hardware/bt_gatt_types.h b/include/hardware/bt_gatt_types.h index 0ac217e..e037ddc 100644 --- a/include/hardware/bt_gatt_types.h +++ b/include/hardware/bt_gatt_types.h @@ -43,6 +43,14 @@ typedef struct uint8_t is_primary; } btgatt_srvc_id_t; +/** Preferred physical Transport for GATT connection */ +typedef enum +{ + GATT_TRANSPORT_AUTO, + GATT_TRANSPORT_BREDR, + GATT_TRANSPORT_LE +} btgatt_transport_t; + __END_DECLS #endif /* ANDROID_INCLUDE_BT_GATT_TYPES_H */ diff --git a/include/hardware/bt_hf.h b/include/hardware/bt_hf.h index 6135ac4..e015c28 100644 --- a/include/hardware/bt_hf.h +++ b/include/hardware/bt_hf.h @@ -79,65 +79,65 @@ typedef void (* bthf_audio_state_callback)(bthf_audio_state_t state, bt_bdaddr_t /** Callback for VR connection state change. * state will have one of the values from BtHfVRState */ -typedef void (* bthf_vr_cmd_callback)(bthf_vr_state_t state); +typedef void (* bthf_vr_cmd_callback)(bthf_vr_state_t state, bt_bdaddr_t *bd_addr); /** Callback for answer incoming call (ATA) */ -typedef void (* bthf_answer_call_cmd_callback)(); +typedef void (* bthf_answer_call_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for disconnect call (AT+CHUP) */ -typedef void (* bthf_hangup_call_cmd_callback)(); +typedef void (* bthf_hangup_call_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for disconnect call (AT+CHUP) * type will denote Speaker/Mic gain (BtHfVolumeControl). */ -typedef void (* bthf_volume_cmd_callback)(bthf_volume_type_t type, int volume); +typedef void (* bthf_volume_cmd_callback)(bthf_volume_type_t type, int volume, bt_bdaddr_t *bd_addr); /** Callback for dialing an outgoing call * If number is NULL, redial */ -typedef void (* bthf_dial_call_cmd_callback)(char *number); +typedef void (* bthf_dial_call_cmd_callback)(char *number, bt_bdaddr_t *bd_addr); /** Callback for sending DTMF tones * tone contains the dtmf character to be sent */ -typedef void (* bthf_dtmf_cmd_callback)(char tone); +typedef void (* bthf_dtmf_cmd_callback)(char tone, bt_bdaddr_t *bd_addr); /** Callback for enabling/disabling noise reduction/echo cancellation * value will be 1 to enable, 0 to disable */ -typedef void (* bthf_nrec_cmd_callback)(bthf_nrec_t nrec); +typedef void (* bthf_nrec_cmd_callback)(bthf_nrec_t nrec, bt_bdaddr_t *bd_addr); /** Callback for call hold handling (AT+CHLD) * value will contain the call hold command (0, 1, 2, 3) */ -typedef void (* bthf_chld_cmd_callback)(bthf_chld_type_t chld); +typedef void (* bthf_chld_cmd_callback)(bthf_chld_type_t chld, bt_bdaddr_t *bd_addr); /** Callback for CNUM (subscriber number) */ -typedef void (* bthf_cnum_cmd_callback)(); +typedef void (* bthf_cnum_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for indicators (CIND) */ -typedef void (* bthf_cind_cmd_callback)(); +typedef void (* bthf_cind_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for operator selection (COPS) */ -typedef void (* bthf_cops_cmd_callback)(); +typedef void (* bthf_cops_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for call list (AT+CLCC) */ -typedef void (* bthf_clcc_cmd_callback) (); +typedef void (* bthf_clcc_cmd_callback) (bt_bdaddr_t *bd_addr); /** Callback for unknown AT command recd from HF * at_string will contain the unparsed AT string */ -typedef void (* bthf_unknown_at_cmd_callback)(char *at_string); +typedef void (* bthf_unknown_at_cmd_callback)(char *at_string, bt_bdaddr_t *bd_addr); /** Callback for keypressed (HSP) event. */ -typedef void (* bthf_key_pressed_cmd_callback)(); +typedef void (* bthf_key_pressed_cmd_callback)(bt_bdaddr_t *bd_addr); /** BT-HF callback structure. */ typedef struct { @@ -213,7 +213,7 @@ typedef struct { /** * Register the BtHf callbacks */ - bt_status_t (*init)( bthf_callbacks_t* callbacks ); + bt_status_t (*init)( bthf_callbacks_t* callbacks, int max_hf_clients); /** connect to headset */ bt_status_t (*connect)( bt_bdaddr_t *bd_addr ); @@ -228,33 +228,33 @@ typedef struct { bt_status_t (*disconnect_audio)( bt_bdaddr_t *bd_addr ); /** start voice recognition */ - bt_status_t (*start_voice_recognition)(); + bt_status_t (*start_voice_recognition)( bt_bdaddr_t *bd_addr ); /** stop voice recognition */ - bt_status_t (*stop_voice_recognition)(); + bt_status_t (*stop_voice_recognition)( bt_bdaddr_t *bd_addr ); /** volume control */ - bt_status_t (*volume_control) (bthf_volume_type_t type, int volume); + bt_status_t (*volume_control) (bthf_volume_type_t type, int volume, bt_bdaddr_t *bd_addr ); /** Combined device status change notification */ bt_status_t (*device_status_notification)(bthf_network_state_t ntk_state, bthf_service_type_t svc_type, int signal, int batt_chg); /** Response for COPS command */ - bt_status_t (*cops_response)(const char *cops); + bt_status_t (*cops_response)(const char *cops, bt_bdaddr_t *bd_addr ); /** Response for CIND command */ bt_status_t (*cind_response)(int svc, int num_active, int num_held, bthf_call_state_t call_setup_state, - int signal, int roam, int batt_chg); + int signal, int roam, int batt_chg, bt_bdaddr_t *bd_addr ); /** Pre-formatted AT response, typically in response to unknown AT cmd */ - bt_status_t (*formatted_at_response)(const char *rsp); + bt_status_t (*formatted_at_response)(const char *rsp, bt_bdaddr_t *bd_addr ); /** ok/error response * ERROR (0) * OK (1) */ - bt_status_t (*at_response) (bthf_at_response_t response_code, int error_code); + bt_status_t (*at_response) (bthf_at_response_t response_code, int error_code, bt_bdaddr_t *bd_addr ); /** response for CLCC command * Can be iteratively called for each call index @@ -263,7 +263,7 @@ typedef struct { bt_status_t (*clcc_response) (int index, bthf_call_direction_t dir, bthf_call_state_t state, bthf_call_mode_t mode, bthf_call_mpty_type_t mpty, const char *number, - bthf_call_addrtype_t type); + bthf_call_addrtype_t type, bt_bdaddr_t *bd_addr ); /** notify of a call state change * Each update notifies diff --git a/include/hardware/camera3.h b/include/hardware/camera3.h index afc9d9f..b326e92 100644 --- a/include/hardware/camera3.h +++ b/include/hardware/camera3.h @@ -21,19 +21,25 @@ #include "camera_common.h" /** - * Camera device HAL 3.1 [ CAMERA_DEVICE_API_VERSION_3_1 ] + * Camera device HAL 3.2 [ CAMERA_DEVICE_API_VERSION_3_2 ] * * EXPERIMENTAL. * * Supports the android.hardware.Camera API. * * Camera devices that support this version of the HAL must return - * CAMERA_DEVICE_API_VERSION_3_1 in camera_device_t.common.version and in + * CAMERA_DEVICE_API_VERSION_3_2 in camera_device_t.common.version and in * camera_info_t.device_version (from camera_module_t.get_camera_info). * - * Camera modules that may contain version 3.1 devices must implement at least - * version 2.0 of the camera module interface (as defined by - * camera_module_t.common.module_api_version). + * CAMERA_DEVICE_API_VERSION_3_2: + * Camera modules that may contain version 3.2 devices must implement at + * least version 2.2 of the camera module interface (as defined by + * camera_module_t.common.module_api_version). + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * Camera modules that may contain version 3.1 (or 3.0) devices must + * implement at least version 2.0 of the camera module interface + * (as defined by camera_module_t.common.module_api_version). * * See camera_common.h for more versioning details. * @@ -44,6 +50,9 @@ * S4. 3A modes and state machines * S5. Cropping * S6. Error management + * S7. Key Performance Indicator (KPI) glossary + * S8. Sample Use Cases + * S9. Notes on Controls and Metadata */ /** @@ -88,6 +97,27 @@ * - configure_streams passes consumer usage flags to the HAL. * * - flush call to drop all in-flight requests/buffers as fast as possible. + * + * 3.2: Minor revision of expanded-capability HAL: + * + * - Deprecates get_metadata_vendor_tag_ops. Please use get_vendor_tag_ops + * in camera_common.h instead. + * + * - register_stream_buffers deprecated. All gralloc buffers provided + * by framework to HAL in process_capture_request may be new at any time. + * + * - add partial result support. process_capture_result may be called + * multiple times with a subset of the available result before the full + * result is available. + * + * - add manual template to camera3_request_template. The applications may + * use this template to control the capture settings directly. + * + * - Rework the bidirectional and input stream specifications. + * + * - change the input buffer return path. The buffer is returned in + * process_capture_result instead of process_capture_request. + * */ /** @@ -108,12 +138,19 @@ * 4. The framework calls camera3_device_t->ops->configure_streams() with a list * of input/output streams to the HAL device. * - * 5. The framework allocates gralloc buffers and calls + * 5. <= CAMERA_DEVICE_API_VERSION_3_1: + * + * The framework allocates gralloc buffers and calls * camera3_device_t->ops->register_stream_buffers() for at least one of the * output streams listed in configure_streams. The same stream is registered * only once. * - * 5. The framework requests default settings for some number of use cases with + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * camera3_device_t->ops->register_stream_buffers() is not called and must + * be NULL. + * + * 6. The framework requests default settings for some number of use cases with * calls to camera3_device_t->ops->construct_default_request_settings(). This * may occur any time after step 3. * @@ -124,23 +161,64 @@ * camera3_device_t->ops->process_capture_request(). The HAL must block the * return of this call until it is ready for the next request to be sent. * - * 8. The framework continues to submit requests, and possibly call - * register_stream_buffers() for not-yet-registered streams, and call + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The buffer_handle_t provided in the camera3_stream_buffer_t array + * in the camera3_capture_request_t may be new and never-before-seen + * by the HAL on any given new request. + * + * 8. The framework continues to submit requests, and call * construct_default_request_settings to get default settings buffers for * other use cases. * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * The framework may call register_stream_buffers() at this time for + * not-yet-registered streams. + * * 9. When the capture of a request begins (sensor starts exposing for the * capture), the HAL calls camera3_callback_ops_t->notify() with the SHUTTER * event, including the frame number and the timestamp for start of exposure. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * This notify call must be made before the first call to * process_capture_result() for that frame number. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The camera3_callback_ops_t->notify() call with the SHUTTER event should + * be made as early as possible since the framework will be unable to + * deliver gralloc buffers to the application layer (for that frame) until + * it has a valid timestamp for the start of exposure. + * + * Both partial metadata results and the gralloc buffers may be sent to the + * framework at any time before or after the SHUTTER event. + * * 10. After some pipeline delay, the HAL begins to return completed captures to * the framework with camera3_callback_ops_t->process_capture_result(). These * are returned in the same order as the requests were submitted. Multiple * requests can be in flight at once, depending on the pipeline depth of the * camera HAL device. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Once a buffer is returned by process_capture_result as part of the + * camera3_stream_buffer_t array, and the fence specified by release_fence + * has been signaled (this is a no-op for -1 fences), the ownership of that + * buffer is considered to be transferred back to the framework. After that, + * the HAL must no longer retain that particular buffer, and the + * framework may clean up the memory for it immediately. + * + * process_capture_result may be called multiple times for a single frame, + * each time with a new disjoint piece of metadata and/or set of gralloc + * buffers. The framework will accumulate these partial metadata results + * into one result. + * + * In particular, it is legal for a process_capture_result to be called + * simultaneously for both a frame N and a frame N+1 as long as the + * above rule holds for gralloc buffers (both input and output). + * * 11. After some time, the framework may stop submitting new requests, wait for * the existing captures to complete (all buffers filled, all results * returned), and then call configure_streams() again. This resets the camera @@ -276,13 +354,10 @@ * * android.scaler.cropRegion (controls) * [ignores (x,y), assumes center-zoom] - * android.scaler.availableFormats (static) - * [RAW not supported] - * android.scaler.availableJpegMinDurations (static) - * android.scaler.availableJpegSizes (static) + * android.scaler.availableStreamConfigurations (static) + * android.scaler.availableMinFrameDurations (static) + * android.scaler.availableStallDurations (static) * android.scaler.availableMaxDigitalZoom (static) - * android.scaler.availableProcessedMinDurations (static) - * android.scaler.availableProcessedSizes (static) * [full resolution not supported] * android.scaler.maxDigitalZoom (static) * android.scaler.cropRegion (dynamic) @@ -816,7 +891,12 @@ * view it is receiving based on the crop region, the dimensions of the image * sensor, and the lens focal length. * - * Since the crop region applies to all streams, which may have different aspect + * It is assumed that the cropping is applied after raw to other color space + * conversion. Raw streams (RAW16 and RAW_OPAQUE) don't have this conversion stage, + * and are not croppable. Therefore, the crop region must be ignored by the HAL + * for raw streams. + * + * Since the crop region applies to all non-raw streams, which may have different aspect * ratios than the crop region, the exact sensor region used for each stream may * be smaller than the crop region. Specifically, each stream should maintain * square pixels and its aspect ratio by minimally further cropping the defined @@ -963,15 +1043,134 @@ * ERROR_BUFFER for each failed buffer. * * In each of these transient failure cases, the HAL must still call - * process_capture_result, with valid output buffer_handle_t. If the result - * metadata could not be produced, it should be NULL. If some buffers could not - * be filled, their sync fences must be set to the error state. + * process_capture_result, with valid output and input (if an input buffer was + * submitted) buffer_handle_t. If the result metadata could not be produced, it + * should be NULL. If some buffers could not be filled, they must be returned with + * process_capture_result in the error state, their release fences must be set to + * the acquire fences passed by the framework, or -1 if they have been waited on by + * the HAL already. * * Invalid input arguments result in -EINVAL from the appropriate methods. In * that case, the framework must act as if that call had never been made. * */ +/** + * S7. Key Performance Indicator (KPI) glossary: + * + * This includes some critical definitions that are used by KPI metrics. + * + * Pipeline Latency: + * For a given capture request, the duration from the framework calling + * process_capture_request to the HAL sending capture result and all buffers + * back by process_capture_result call. To make the Pipeline Latency measure + * independent of frame rate, it is measured by frame count. + * + * For example, when frame rate is 30 (fps), the frame duration (time interval + * between adjacent frame capture time) is 33 (ms). + * If it takes 5 frames for framework to get the result and buffers back for + * a given request, then the Pipeline Latency is 5 (frames), instead of + * 5 x 33 = 165 (ms). + * + * The Pipeline Latency is determined by android.request.pipelineDepth and + * android.request.pipelineMaxDepth, see their definitions for more details. + * + */ + +/** + * S8. Sample Use Cases: + * + * This includes some typical use case examples the camera HAL may support. + * + * S8.1 Zero Shutter Lag (ZSL) with CAMERA3_STREAM_INPUT stream. + * + * When Zero Shutter Lag (ZSL) is supported by the camera device, the INPUT stream + * can be used for application/framework implemented ZSL use case. This kind of stream + * will be used by the framework as follows: + * + * 1. Framework configures an opaque raw format output stream that is used to + * produce the ZSL output buffers. The stream pixel format will be + * HAL_PIXEL_FORMAT_RAW_OPAQUE. + * + * 2. Framework configures an opaque raw format input stream that is used to + * send the reprocess ZSL buffers to the HAL. The stream pixel format will + * also be HAL_PIXEL_FORMAT_RAW_OPAQUE. + * + * 3. Framework configures a YUV/JPEG output stream that is used to receive the + * reprocessed data. The stream pixel format will be YCbCr_420/HAL_PIXEL_FORMAT_BLOB. + * + * 4. Framework picks a ZSL buffer from the output stream when a ZSL capture is + * issued by the application, and sends the data back as an input buffer in a + * reprocessing request, then sends to the HAL for reprocessing. + * + * 5. The HAL sends back the output JPEG result to framework. + * + * The HAL can select the actual raw buffer format and configure the ISP pipeline + * appropriately based on the HAL_PIXEL_FORMAT_RAW_OPAQUE format. See this format + * definition for more details. + * + * S8.2 Zero Shutter Lag (ZSL) with CAMERA3_STREAM_BIDIRECTIONAL stream. + * + * For this use case, the bidirectional stream will be used by the framework as follows: + * + * 1. The framework includes a buffer from this stream as output buffer in a + * request as normal. + * + * 2. Once the HAL device returns a filled output buffer to the framework, + * the framework may do one of two things with the filled buffer: + * + * 2. a. The framework uses the filled data, and returns the now-used buffer + * to the stream queue for reuse. This behavior exactly matches the + * OUTPUT type of stream. + * + * 2. b. The framework wants to reprocess the filled data, and uses the + * buffer as an input buffer for a request. Once the HAL device has + * used the reprocessing buffer, it then returns it to the + * framework. The framework then returns the now-used buffer to the + * stream queue for reuse. + * + * 3. The HAL device will be given the buffer again as an output buffer for + * a request at some future point. + * + * For ZSL use case, the pixel format for bidirectional stream will be + * HAL_PIXEL_FORMAT_RAW_OPAQUE if it is listed in + * android.scaler.availableInputOutputFormatsMap. A configuration stream list + * that has BIDIRECTIONAL stream used as input, will usually also have a + * distinct OUTPUT stream to get the reprocessing data. For example, for the + * ZSL use case, the stream list might be configured with the following: + * + * - A HAL_PIXEL_FORMAT_RAW_OPAQUE bidirectional stream is used + * as input. + * - And a HAL_PIXEL_FORMAT_BLOB (JPEG) output stream. + * + */ + +/** + * S9. Notes on Controls and Metadata + * + * This section contains notes about the interpretation and usage of various metadata tags. + * + * S9.1 HIGH_QUALITY and FAST modes. + * + * Many camera post-processing blocks may be listed as having HIGH_QUALITY, + * FAST, and OFF operating modes. These blocks will typically also have an + * 'available modes' tag representing which of these operating modes are + * available on a given device. The general policy regarding implementing + * these modes is as follows: + * + * 1. Operating mode controls of hardware blocks that cannot be disabled + * must not list OFF in their corresponding 'available modes' tags. + * + * 2. OFF will always be included in their corresponding 'available modes' + * tag if it is possible to disable that hardware block. + * + * 3. FAST must always be included in the 'available modes' tags for all + * post-processing blocks supported on the device. If a post-processing + * block also has a slower and higher quality operating mode that does + * not meet the framerate requirements for FAST mode, HIGH_QUALITY should + * be included in the 'available modes' tag to represent this operating + * mode. + */ __BEGIN_DECLS struct camera3_device; @@ -1006,6 +1205,21 @@ typedef enum camera3_stream_type { * for reading buffers from this stream and sending them through the camera * processing pipeline, as if the buffer was a newly captured image from the * imager. + * + * The pixel format for input stream can be any format reported by + * android.scaler.availableInputOutputFormatsMap. The pixel format of the + * output stream that is used to produce the reprocessing data may be any + * format reported by android.scaler.availableStreamConfigurations. The + * supported input/output stream combinations depends the camera device + * capabilities, see android.scaler.availableInputOutputFormatsMap for + * stream map details. + * + * This kind of stream is generally used to reprocess data into higher + * quality images (that otherwise would cause a frame rate performance + * loss), or to do off-line reprocessing. + * + * A typical use case is Zero Shutter Lag (ZSL), see S8.1 for more details. + * */ CAMERA3_STREAM_INPUT = 1, @@ -1014,29 +1228,9 @@ typedef enum camera3_stream_type { * used as an output stream, but occasionally one already-filled buffer may * be sent back to the HAL device for reprocessing. * - * This kind of stream is meant generally for zero-shutter-lag features, - * where copying the captured image from the output buffer to the - * reprocessing input buffer would be expensive. The stream will be used by - * the framework as follows: - * - * 1. The framework includes a buffer from this stream as output buffer in a - * request as normal. - * - * 2. Once the HAL device returns a filled output buffer to the framework, - * the framework may do one of two things with the filled buffer: - * - * 2. a. The framework uses the filled data, and returns the now-used buffer - * to the stream queue for reuse. This behavior exactly matches the - * OUTPUT type of stream. - * - * 2. b. The framework wants to reprocess the filled data, and uses the - * buffer as an input buffer for a request. Once the HAL device has - * used the reprocessing buffer, it then returns it to the - * framework. The framework then returns the now-used buffer to the - * stream queue for reuse. - * - * 3. The HAL device will be given the buffer again as an output buffer for - * a request at some future point. + * This kind of stream is meant generally for Zero Shutter Lag (ZSL) + * features, where copying the captured image from the output buffer to the + * reprocessing input buffer would be expensive. See S8.2 for more details. * * Note that the HAL will always be reprocessing data it produced. * @@ -1105,9 +1299,17 @@ typedef struct camera3_stream { * gralloc module will select a format based on the usage flags provided by * the camera device and the other endpoint of the stream. * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * The camera HAL device must inspect the buffers handed to it in the * subsequent register_stream_buffers() call to obtain the * implementation-specific format details, if necessary. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * register_stream_buffers() won't be called by the framework, so the HAL + * should configure the ISP and sensor pipeline based purely on the sizes, + * usage flags, and formats for the configured streams. */ int format; @@ -1257,6 +1459,14 @@ typedef struct camera3_stream_buffer { * * For input buffers, the HAL must not change the acquire_fence field during * the process_capture_request() call. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * When the HAL returns an input buffer to the framework with + * process_capture_result(), the acquire_fence must be set to -1. If the HAL + * never waits on input buffer acquire fence due to an error, the sync + * fences should be handled similarly to the way they are handled for output + * buffers. */ int acquire_fence; @@ -1265,10 +1475,25 @@ typedef struct camera3_stream_buffer { * returning buffers to the framework, or write -1 to indicate that no * waiting is required for this buffer. * + * For the output buffers, the fences must be set in the output_buffers + * array passed to process_capture_result(). + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * For the input buffer, the release fence must be set by the - * process_capture_request() call. For the output buffers, the fences must - * be set in the output_buffers array passed to process_capture_result(). + * process_capture_request() call. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * For the input buffer, the fences must be set in the input_buffer + * passed to process_capture_result(). * + * After signaling the release_fence for this buffer, the HAL + * should not make any further attempts to access this buffer as the + * ownership has been fully transferred back to the framework. + * + * If a fence of -1 was specified then the ownership of this buffer + * is transferred back immediately upon the call of process_capture_result. */ int release_fence; @@ -1280,6 +1505,12 @@ typedef struct camera3_stream_buffer { * The complete set of gralloc buffers for a stream. This structure is given to * register_stream_buffers() to allow the camera HAL device to register/map/etc * newly allocated stream buffers. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Deprecated (and not used). In particular, + * register_stream_buffers is also deprecated and will never be invoked. + * */ typedef struct camera3_stream_buffer_set { /** @@ -1309,17 +1540,18 @@ typedef struct camera3_stream_buffer_set { * Transport header for compressed JPEG buffers in output streams. * * To capture JPEG images, a stream is created using the pixel format - * HAL_PIXEL_FORMAT_BLOB, and the static metadata field android.jpeg.maxSize is - * used as the buffer size. Since compressed JPEG images are of variable size, - * the HAL needs to include the final size of the compressed image using this - * structure inside the output stream buffer. The JPEG blob ID field must be set - * to CAMERA3_JPEG_BLOB_ID. - * - * Transport header should be at the end of the JPEG output stream buffer. That - * means the jpeg_blob_id must start at byte[android.jpeg.maxSize - - * sizeof(camera3_jpeg_blob)]. Any HAL using this transport header must - * account for it in android.jpeg.maxSize. The JPEG data itself starts at - * the beginning of the buffer and should be jpeg_size bytes long. + * HAL_PIXEL_FORMAT_BLOB. The buffer size for the stream is calculated by the + * framework, based on the static metadata field android.jpeg.maxSize. Since + * compressed JPEG images are of variable size, the HAL needs to include the + * final size of the compressed image using this structure inside the output + * stream buffer. The JPEG blob ID field must be set to CAMERA3_JPEG_BLOB_ID. + * + * Transport header should be at the end of the JPEG output stream buffer. That + * means the jpeg_blob_id must start at byte[buffer_size - + * sizeof(camera3_jpeg_blob)], where the buffer_size is the size of gralloc buffer. + * Any HAL using this transport header must account for it in android.jpeg.maxSize + * The JPEG data itself starts at the beginning of the buffer and should be + * jpeg_size bytes long. */ typedef struct camera3_jpeg_blob { uint16_t jpeg_blob_id; @@ -1534,6 +1766,16 @@ typedef enum camera3_request_template { */ CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG = 5, + /** + * A basic template for direct application control of capture + * parameters. All automatic control is disabled (auto-exposure, auto-white + * balance, auto-focus), and post-processing parameters are set to preview + * quality. The manual capture parameters (exposure, sensitivity, etc.) + * are set to reasonable defaults, but should be overridden by the + * application depending on the intended use case. + */ + CAMERA3_TEMPLATE_MANUAL = 6, + /* Total number of templates */ CAMERA3_TEMPLATE_COUNT, @@ -1592,8 +1834,15 @@ typedef struct camera3_capture_request { * The HAL is required to wait on the acquire sync fence of the input buffer * before accessing it. * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * Any input buffer included here will have been registered with the HAL * through register_stream_buffers() before its inclusion in a request. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The buffers will not have been pre-registered with the HAL. + * Subsequent requests may reuse buffers, or provide entirely new buffers. */ camera3_stream_buffer_t *input_buffer; @@ -1606,13 +1855,21 @@ typedef struct camera3_capture_request { /** * An array of num_output_buffers stream buffers, to be filled with image * data from this capture/reprocess. The HAL must wait on the acquire fences - * of each stream buffer before writing to them. All the buffers included - * here will have been registered with the HAL through - * register_stream_buffers() before their inclusion in a request. + * of each stream buffer before writing to them. * * The HAL takes ownership of the actual buffer_handle_t entries in * output_buffers; the framework does not access them until they are * returned in a camera3_capture_result_t. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * All the buffers included here will have been registered with the HAL + * through register_stream_buffers() before their inclusion in a request. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Any or all of the buffers included here may be brand new in this + * request (having never before seen by the HAL). */ const camera3_stream_buffer_t *output_buffers; @@ -1625,7 +1882,9 @@ typedef struct camera3_capture_request { * sent to the framework asynchronously with process_capture_result(), in * response to a single capture request sent to the HAL with * process_capture_request(). Multiple process_capture_result() calls may be - * performed by the HAL for each request. Each call, all with the same frame + * performed by the HAL for each request. + * + * Each call, all with the same frame * number, may contain some subset of the output buffers, and/or the result * metadata. The metadata may only be provided once for a given frame number; * all other calls must set the result metadata to NULL. @@ -1635,6 +1894,29 @@ typedef struct camera3_capture_request { * output buffer may come with a release sync fence that the framework will wait * on before reading, in case the buffer has not yet been filled by the HAL. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The metadata may be provided multiple times for a single frame number. The + * framework will accumulate together the final result set by combining each + * partial result together into the total result set. + * + * If an input buffer is given in a request, the HAL must return it in one of + * the process_capture_result calls, and the call may be to just return the input + * buffer, without metadata and output buffers; the sync fences must be handled + * the same way they are done for output buffers. + * + * + * Performance considerations: + * + * Applications will also receive these partial results immediately, so sending + * partial results is a highly recommended performance optimization to avoid + * the total pipeline latency before sending the results for what is known very + * early on in the pipeline. + * + * A typical use case might be calculating the AF state halfway through the + * pipeline; by sending the state back to the framework immediately, we get a + * 50% performance increase and perceived responsiveness of the auto-focus. + * */ typedef struct camera3_capture_result { /** @@ -1657,6 +1939,18 @@ typedef struct camera3_capture_result { * * If there was an error producing the result metadata, result must be an * empty metadata buffer, and notify() must be called with ERROR_RESULT. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Multiple calls to process_capture_result() with a given frame_number + * may include the result metadata. + * + * Partial metadata submitted should not include any metadata key returned + * in a previous partial result for a given frame. Each new partial result + * for that frame must also set a distinct partial_result value. + * + * If notify has been called with ERROR_RESULT, all further partial + * results for that frame are ignored by the framework. */ const camera_metadata_t *result; @@ -1690,9 +1984,71 @@ typedef struct camera3_capture_result { * num_output_buffers is zero, this may be NULL. In that case, at least one * more process_capture_result call must be made by the HAL to provide the * output buffers. + * + * When process_capture_result is called with a new buffer for a frame, + * all previous frames' buffers for that corresponding stream must have been + * already delivered (the fences need not have yet been signaled). + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Gralloc buffers for a frame may be sent to framework before the + * corresponding SHUTTER-notify. + * + * Performance considerations: + * + * Buffers delivered to the framework will not be dispatched to the + * application layer until a start of exposure timestamp has been received + * via a SHUTTER notify() call. It is highly recommended to + * dispatch that call as early as possible. */ const camera3_stream_buffer_t *output_buffers; + /** + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The handle for the input stream buffer for this capture. It may not + * yet be consumed at the time the HAL calls process_capture_result(); the + * framework will wait on the release sync fences provided by the HAL before + * reusing the buffer. + * + * The HAL should handle the sync fences the same way they are done for + * output_buffers. + * + * Only one input buffer is allowed to be sent per request. Similarly to + * output buffers, the ordering of returned input buffers must be + * maintained by the HAL. + * + * Performance considerations: + * + * The input buffer should be returned as early as possible. If the HAL + * supports sync fences, it can call process_capture_result to hand it back + * with sync fences being set appropriately. If the sync fences are not + * supported, the buffer can only be returned when it is consumed, which + * may take long time; the HAL may choose to copy this input buffer to make + * the buffer return sooner. + */ + const camera3_stream_buffer_t *input_buffer; + + /** + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * In order to take advantage of partial results, the HAL must set the + * static metadata android.request.partialResultCount to the number of + * partial results it will send for each frame. + * + * Each new capture result with a partial result must set + * this field (partial_result) to a distinct inclusive value between + * 1 and android.request.partialResultCount. + * + * HALs not wishing to take advantage of this feature must not + * set an android.request.partialResultCount or partial_result to a value + * other than 1. + * + * This value must be set to 0 when a capture result contains buffers only + * and no metadata. + */ + uint32_t partial_result; + } camera3_capture_result_t; /********************************************************************** @@ -1768,6 +2124,13 @@ typedef struct camera3_callback_ops { * message. In this case, individual ERROR_RESULT/ERROR_BUFFER messages * should not be sent. * + * Performance requirements: + * + * This is a non-blocking call. The framework will return this call in 5ms. + * + * The pipeline latency (see S7 for definition) should be less than or equal to + * 4 frame intervals, and must be less than or equal to 8 frame intervals. + * */ void (*process_capture_result)(const struct camera3_callback_ops *, const camera3_capture_result_t *result); @@ -1781,11 +2144,25 @@ typedef struct camera3_callback_ops { * with the HAL, and the msg only needs to be valid for the duration of this * call. * + * Multiple threads may call notify() simultaneously. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * The notification for the start of exposure for a given request must be * sent by the HAL before the first call to process_capture_result() for * that request is made. * - * Multiple threads may call notify() simultaneously. + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Buffers delivered to the framework will not be dispatched to the + * application layer until a start of exposure timestamp has been received + * via a SHUTTER notify() call. It is highly recommended to + * dispatch this call as early as possible. + * + * ------------------------------------------------------------------------ + * Performance requirements: + * + * This is a non-blocking call. The framework will return this call in 5ms. */ void (*notify)(const struct camera3_callback_ops *, const camera3_notify_msg_t *msg); @@ -1806,6 +2183,11 @@ typedef struct camera3_device_ops { * the HAL. Will be called once after a successful open() call, before any * other functions are called on the camera3_device_ops structure. * + * Performance requirements: + * + * This should be a non-blocking call. The HAL should return from this call + * in 5ms, and must return from this call in 10ms. + * * Return values: * * 0: On successful initialization @@ -1823,6 +2205,8 @@ typedef struct camera3_device_ops { /** * configure_streams: * + * CAMERA_DEVICE_API_VERSION_3_0 only: + * * Reset the HAL camera device processing pipeline and set up new input and * output streams. This call replaces any existing stream configuration with * the streams defined in the stream_list. This method will be called at @@ -1835,16 +2219,19 @@ typedef struct camera3_device_ops { * The stream_list may contain streams that are also in the currently-active * set of streams (from the previous call to configure_stream()). These * streams will already have valid values for usage, max_buffers, and the - * private pointer. If such a stream has already had its buffers registered, + * private pointer. + * + * If such a stream has already had its buffers registered, * register_stream_buffers() will not be called again for the stream, and * buffers from the stream can be immediately included in input requests. * * If the HAL needs to change the stream configuration for an existing * stream due to the new configuration, it may rewrite the values of usage - * and/or max_buffers during the configure call. The framework will detect - * such a change, and will then reallocate the stream buffers, and call - * register_stream_buffers() again before using buffers from that stream in - * a request. + * and/or max_buffers during the configure call. + * + * The framework will detect such a change, and will then reallocate the + * stream buffers, and call register_stream_buffers() again before using + * buffers from that stream in a request. * * If a currently-active stream is not included in stream_list, the HAL may * safely remove any references to that stream. It will not be reused in a @@ -1873,6 +2260,115 @@ typedef struct camera3_device_ops { * of (for example) a preview stream, with allocation for other streams * happening later or concurrently. * + * ------------------------------------------------------------------------ + * CAMERA_DEVICE_API_VERSION_3_1 only: + * + * Reset the HAL camera device processing pipeline and set up new input and + * output streams. This call replaces any existing stream configuration with + * the streams defined in the stream_list. This method will be called at + * least once after initialize() before a request is submitted with + * process_capture_request(). + * + * The stream_list must contain at least one output-capable stream, and may + * not contain more than one input-capable stream. + * + * The stream_list may contain streams that are also in the currently-active + * set of streams (from the previous call to configure_stream()). These + * streams will already have valid values for usage, max_buffers, and the + * private pointer. + * + * If such a stream has already had its buffers registered, + * register_stream_buffers() will not be called again for the stream, and + * buffers from the stream can be immediately included in input requests. + * + * If the HAL needs to change the stream configuration for an existing + * stream due to the new configuration, it may rewrite the values of usage + * and/or max_buffers during the configure call. + * + * The framework will detect such a change, and will then reallocate the + * stream buffers, and call register_stream_buffers() again before using + * buffers from that stream in a request. + * + * If a currently-active stream is not included in stream_list, the HAL may + * safely remove any references to that stream. It will not be reused in a + * later configure() call by the framework, and all the gralloc buffers for + * it will be freed after the configure_streams() call returns. + * + * The stream_list structure is owned by the framework, and may not be + * accessed once this call completes. The address of an individual + * camera3_stream_t structure will remain valid for access by the HAL until + * the end of the first configure_stream() call which no longer includes + * that camera3_stream_t in the stream_list argument. The HAL may not change + * values in the stream structure outside of the private pointer, except for + * the usage and max_buffers members during the configure_streams() call + * itself. + * + * If the stream is new, max_buffer, and private pointer fields of the + * stream structure will all be set to 0. The usage will be set to the + * consumer usage flags. The HAL device must set these fields before the + * configure_streams() call returns. These fields are then used by the + * framework and the platform gralloc module to allocate the gralloc + * buffers for each stream. + * + * Before such a new stream can have its buffers included in a capture + * request, the framework will call register_stream_buffers() with that + * stream. However, the framework is not required to register buffers for + * _all_ streams before submitting a request. This allows for quick startup + * of (for example) a preview stream, with allocation for other streams + * happening later or concurrently. + * + * ------------------------------------------------------------------------ + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Reset the HAL camera device processing pipeline and set up new input and + * output streams. This call replaces any existing stream configuration with + * the streams defined in the stream_list. This method will be called at + * least once after initialize() before a request is submitted with + * process_capture_request(). + * + * The stream_list must contain at least one output-capable stream, and may + * not contain more than one input-capable stream. + * + * The stream_list may contain streams that are also in the currently-active + * set of streams (from the previous call to configure_stream()). These + * streams will already have valid values for usage, max_buffers, and the + * private pointer. + * + * If the HAL needs to change the stream configuration for an existing + * stream due to the new configuration, it may rewrite the values of usage + * and/or max_buffers during the configure call. + * + * The framework will detect such a change, and may then reallocate the + * stream buffers before using buffers from that stream in a request. + * + * If a currently-active stream is not included in stream_list, the HAL may + * safely remove any references to that stream. It will not be reused in a + * later configure() call by the framework, and all the gralloc buffers for + * it will be freed after the configure_streams() call returns. + * + * The stream_list structure is owned by the framework, and may not be + * accessed once this call completes. The address of an individual + * camera3_stream_t structure will remain valid for access by the HAL until + * the end of the first configure_stream() call which no longer includes + * that camera3_stream_t in the stream_list argument. The HAL may not change + * values in the stream structure outside of the private pointer, except for + * the usage and max_buffers members during the configure_streams() call + * itself. + * + * If the stream is new, max_buffer, and private pointer fields of the + * stream structure will all be set to 0. The usage will be set to the + * consumer usage flags. The HAL device must set these fields before the + * configure_streams() call returns. These fields are then used by the + * framework and the platform gralloc module to allocate the gralloc + * buffers for each stream. + * + * Newly allocated buffers may be included in a capture request at any time + * by the framework. Once a gralloc buffer is returned to the framework + * with process_capture_result (and its respective release_fence has been + * signaled) the framework may free or reuse it at any time. + * + * ------------------------------------------------------------------------ + * * Preconditions: * * The framework will only call this method when no captures are being @@ -1888,7 +2384,7 @@ typedef struct camera3_device_ops { * frame rate given the sizes and formats of the output streams, as * documented in the camera device's static metadata. * - * Performance expectations: + * Performance requirements: * * This call is expected to be heavyweight and possibly take several hundred * milliseconds to complete, since it may require resetting and @@ -1898,6 +2394,9 @@ typedef struct camera3_device_ops { * application operational mode changes (such as switching from still * capture to video recording). * + * The HAL should return from this call in 500ms, and must return from this + * call in 1000ms. + * * Return values: * * 0: On successful stream configuration @@ -1933,6 +2432,12 @@ typedef struct camera3_device_ops { /** * register_stream_buffers: * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * DEPRECATED. This will not be called and must be set to NULL. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * Register buffers for a given stream with the HAL device. This method is * called by the framework after a new stream is defined by * configure_streams, and before buffers from that stream are included in a @@ -1955,6 +2460,11 @@ typedef struct camera3_device_ops { * the camera HAL should inspect the passed-in buffers here to determine any * platform-private pixel format information. * + * Performance requirements: + * + * This should be a non-blocking call. The HAL should return from this call + * in 1ms, and must return from this call in 5ms. + * * Return values: * * 0: On successful registration of the new stream buffers @@ -1992,6 +2502,11 @@ typedef struct camera3_device_ops { * buffer may be returned for subsequent calls for the same template, or for * other templates. * + * Performance requirements: + * + * This should be a non-blocking call. The HAL should return from this call + * in 1ms, and must return from this call in 5ms. + * * Return values: * * Valid metadata: On successful creation of a default settings @@ -2036,6 +2551,22 @@ typedef struct camera3_device_ops { * framework will wait on the sync fence before refilling and reusing the * input buffer. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The input/output buffers provided by the framework in each request + * may be brand new (having never before seen by the HAL). + * + * ------------------------------------------------------------------------ + * Performance considerations: + * + * Handling a new buffer should be extremely lightweight and there should be + * no frame rate degradation or frame jitter introduced. + * + * This call must return fast enough to ensure that the requested frame + * rate can be sustained, especially for streaming cases (post-processing + * quality settings set to FAST). The HAL should return this call in 1 + * frame interval, and must return from this call in 4 frame intervals. + * * Return values: * * 0: On a successful start to processing the capture request @@ -2071,6 +2602,10 @@ typedef struct camera3_device_ops { * The definition of vendor_tag_query_ops_t can be found in * system/media/camera/include/system/camera_metadata.h. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * DEPRECATED. This function has been deprecated and should be set to + * NULL by the HAL. Please implement get_vendor_tag_ops in camera_common.h + * instead. */ void (*get_metadata_vendor_tag_ops)(const struct camera3_device*, vendor_tag_query_ops_t* ops); @@ -2084,6 +2619,14 @@ typedef struct camera3_device_ops { * * The passed-in file descriptor can be used to write debugging text using * dprintf() or write(). The text should be in ASCII encoding only. + * + * Performance requirements: + * + * This must be a non-blocking call. The HAL should return from this call + * in 1ms, must return from this call in 10ms. This call must avoid + * deadlocks, as it may be called at any point during camera operation. + * Any synchronization primitives used (such as mutex locks or semaphores) + * should be acquired with a timeout. */ void (*dump)(const struct camera3_device *, int fd); @@ -2095,22 +2638,73 @@ typedef struct camera3_device_ops { * quickly as possible in order to prepare for a configure_streams() call. * * No buffers are required to be successfully returned, so every buffer - * held at the time of flush() (whether sucessfully filled or not) may be + * held at the time of flush() (whether successfully filled or not) may be * returned with CAMERA3_BUFFER_STATUS_ERROR. Note the HAL is still allowed - * to return valid (STATUS_OK) buffers during this call, provided they are - * succesfully filled. + * to return valid (CAMERA3_BUFFER_STATUS_OK) buffers during this call, + * provided they are successfully filled. * * All requests currently in the HAL are expected to be returned as soon as * possible. Not-in-process requests should return errors immediately. Any * interruptible hardware blocks should be stopped, and any uninterruptible * blocks should be waited on. * + * More specifically, the HAL must follow below requirements for various cases: + * + * 1. For captures that are too late for the HAL to cancel/stop, and will be + * completed normally by the HAL; i.e. the HAL can send shutter/notify and + * process_capture_result and buffers as normal. + * + * 2. For pending requests that have not done any processing, the HAL must call notify + * CAMERA3_MSG_ERROR_REQUEST, and return all the output buffers with + * process_capture_result in the error state (CAMERA3_BUFFER_STATUS_ERROR). + * The HAL must not place the release fence into an error state, instead, + * the release fences must be set to the acquire fences passed by the framework, + * or -1 if they have been waited on by the HAL already. This is also the path + * to follow for any captures for which the HAL already called notify() with + * CAMERA3_MSG_SHUTTER but won't be producing any metadata/valid buffers for. + * After CAMERA3_MSG_ERROR_REQUEST, for a given frame, only process_capture_results with + * buffers in CAMERA3_BUFFER_STATUS_ERROR are allowed. No further notifys or + * process_capture_result with non-null metadata is allowed. + * + * 3. For partially completed pending requests that will not have all the output + * buffers or perhaps missing metadata, the HAL should follow below: + * + * 3.1. Call notify with CAMERA3_MSG_ERROR_RESULT if some of the expected result + * metadata (i.e. one or more partial metadata) won't be available for the capture. + * + * 3.2. Call notify with CAMERA3_MSG_ERROR_BUFFER for every buffer that won't + * be produced for the capture. + * + * 3.3 Call notify with CAMERA3_MSG_SHUTTER with the capture timestamp before + * any buffers/metadata are returned with process_capture_result. + * + * 3.4 For captures that will produce some results, the HAL must not call + * CAMERA3_MSG_ERROR_REQUEST, since that indicates complete failure. + * + * 3.5. Valid buffers/metadata should be passed to the framework as normal. + * + * 3.6. Failed buffers should be returned to the framework as described for case 2. + * But failed buffers do not have to follow the strict ordering valid buffers do, + * and may be out-of-order with respect to valid buffers. For example, if buffers + * A, B, C, D, E are sent, D and E are failed, then A, E, B, D, C is an acceptable + * return order. + * + * 3.7. For fully-missing metadata, calling CAMERA3_MSG_ERROR_RESULT is sufficient, no + * need to call process_capture_result with NULL metadata or equivalent. + * * flush() should only return when there are no more outstanding buffers or - * requests left in the HAL. The framework may call configure_streams (as + * requests left in the HAL. The framework may call configure_streams (as * the HAL state is now quiesced) or may issue new requests. * - * A flush() call should only take 100ms or less. The maximum time it can - * take is 1 second. + * Note that it's sufficient to only support fully-succeeded and fully-failed result cases. + * However, it is highly desirable to support the partial failure cases as well, as it + * could help improve the flush call overall performance. + * + * Performance requirements: + * + * The HAL should return from this call in 100ms, and must return from this + * call in 1000ms. And this call must not be blocked longer than pipeline + * latency (see S7 for definition). * * Version information: * @@ -2141,6 +2735,13 @@ typedef struct camera3_device { /** * common.version must equal CAMERA_DEVICE_API_VERSION_3_0 to identify this * device as implementing version 3.0 of the camera device HAL. + * + * Performance requirements: + * + * Camera open (common.module->common.methods->open) should return in 200ms, and must return + * in 500ms. + * Camera close (common.close) should return in 200ms, and must return in 500ms. + * */ hw_device_t common; camera3_device_ops_t *ops; diff --git a/include/hardware/camera_common.h b/include/hardware/camera_common.h index 3a1233f..15b3b19 100644 --- a/include/hardware/camera_common.h +++ b/include/hardware/camera_common.h @@ -24,6 +24,7 @@ #include <sys/types.h> #include <cutils/native_handle.h> #include <system/camera.h> +#include <system/camera_vendor_tags.h> #include <hardware/hardware.h> #include <hardware/gralloc.h> @@ -100,8 +101,9 @@ __BEGIN_DECLS #define CAMERA_DEVICE_API_VERSION_2_1 HARDWARE_DEVICE_API_VERSION(2, 1) #define CAMERA_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0) #define CAMERA_DEVICE_API_VERSION_3_1 HARDWARE_DEVICE_API_VERSION(3, 1) +#define CAMERA_DEVICE_API_VERSION_3_2 HARDWARE_DEVICE_API_VERSION(3, 2) -// Device version 2.x is outdated; device version 3.0 is experimental +// Device version 2.x is outdated; device version 3.x is experimental #define CAMERA_DEVICE_API_VERSION_CURRENT CAMERA_DEVICE_API_VERSION_1_0 /** @@ -251,66 +253,13 @@ typedef struct camera_module_callbacks { } camera_module_callbacks_t; -/** - * Set up vendor-specific tag query methods. These are needed to properly query - * entries with vendor-specified tags, potentially returned by get_camera_info. - * - * This should be used in place of vendor_tag_query_ops, which are deprecated. - */ -typedef struct vendor_tag_ops vendor_tag_ops_t; -struct vendor_tag_ops { - /** - * Get the number of vendor tags supported on this platform. Used to - * calculate the size of buffer needed for holding the array of all tags - * returned by get_all_tags(). - */ - int (*get_tag_count)(const vendor_tag_ops_t *v); - - /** - * Fill an array with all the supported vendor tags on this platform. - * get_tag_count() returns the number of tags supported, and - * tag_array will be allocated with enough space to hold all of the tags. - */ - void (*get_all_tags)(const vendor_tag_ops_t *v, uint32_t *tag_array); - - /** - * Get vendor section name for a vendor-specified entry tag. Only called for - * vendor-defined tags. The section name must start with the name of the - * vendor in the Java package style. For example, CameraZoom Inc. must - * prefix their sections with "com.camerazoom." Must return NULL if the tag - * is outside the bounds of vendor-defined sections. - * - * There may be different vendor-defined tag sections, for example the - * phone maker, the chipset maker, and the camera module maker may each - * have their own "com.vendor."-prefixed section. - * - * The memory pointed to by the return value must remain valid for the - * lifetime that the module is loaded, and is owned by the module. - */ - const char *(*get_section_name)(const vendor_tag_ops_t *v, uint32_t tag); - - /** - * Get tag name for a vendor-specified entry tag. Only called for - * vendor-defined tags. Must return NULL if the it is not a vendor-defined - * tag. - * - * The memory pointed to by the return value must remain valid for the - * lifetime that the module is loaded, and is owned by the module. - */ - const char *(*get_tag_name)(const vendor_tag_ops_t *v, uint32_t tag); - +typedef struct camera_module { /** - * Get tag type for a vendor-specified entry tag. Only called for tags >= - * 0x80000000. Must return -1 if the tag is outside the bounds of - * vendor-defined sections. + * Common methods of the camera module. This *must* be the first member of + * camera_module as users of this structure will cast a hw_module_t to + * camera_module pointer in contexts where it's known the hw_module_t references a + * camera_module. */ - int (*get_tag_type)(const vendor_tag_ops_t *v, uint32_t tag); - - /* reserved for future use */ - void* reserved[8]; -}; - -typedef struct camera_module { hw_module_t common; /** @@ -365,6 +314,9 @@ typedef struct camera_module { * HAL should fill in all the vendor tag operation methods, or leave ops * unchanged if no vendor tags are defined. * + * The vendor_tag_ops structure used here is defined in: + * system/media/camera/include/system/vendor_tags.h + * * Version information (based on camera_module_t.common.module_api_version): * * CAMERA_MODULE_API_VERSION_1_x/2_0/2_1: diff --git a/include/hardware/consumerir.h b/include/hardware/consumerir.h index 5adf6be..15334c1 100644 --- a/include/hardware/consumerir.h +++ b/include/hardware/consumerir.h @@ -32,10 +32,22 @@ typedef struct consumerir_freq_range { } consumerir_freq_range_t; typedef struct consumerir_module { + /** + * Common methods of the consumer IR module. This *must* be the first member of + * consumerir_module as users of this structure will cast a hw_module_t to + * consumerir_module pointer in contexts where it's known the hw_module_t references a + * consumerir_module. + */ struct hw_module_t common; } consumerir_module_t; typedef struct consumerir_device { + /** + * Common methods of the consumer IR device. This *must* be the first member of + * consumerir_device as users of this structure will cast a hw_device_t to + * consumerir_device pointer in contexts where it's known the hw_device_t references a + * consumerir_device. + */ struct hw_device_t common; /* diff --git a/include/hardware/fb.h b/include/hardware/fb.h index 135e4aa..9df9416 100644 --- a/include/hardware/fb.h +++ b/include/hardware/fb.h @@ -36,6 +36,12 @@ __BEGIN_DECLS /*****************************************************************************/ typedef struct framebuffer_device_t { + /** + * Common methods of the framebuffer device. This *must* be the first member of + * framebuffer_device_t as users of this structure will cast a hw_device_t to + * framebuffer_device_t pointer in contexts where it's known the hw_device_t references a + * framebuffer_device_t. + */ struct hw_device_t common; /* flags describing some attributes of the framebuffer */ diff --git a/include/hardware/fingerprint.h b/include/hardware/fingerprint.h new file mode 100644 index 0000000..7f6fa28 --- /dev/null +++ b/include/hardware/fingerprint.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_INCLUDE_HARDWARE_FINGERPRINT_H +#define ANDROID_INCLUDE_HARDWARE_FINGERPRINT_H + +#define FINGERPRINT_MODULE_API_VERSION_1_0 HARDWARE_MODULE_API_VERSION(1, 0) +#define FINGERPRINT_HARDWARE_MODULE_ID "fingerprint" + +typedef enum fingerprint_msg_type { + FINGERPRINT_ERROR = -1, + FINGERPRINT_SCANNED = 1, + FINGERPRINT_TEMPLATE_ENROLLING = 2, + FINGERPRINT_TEMPLATE_REMOVED = 4 +} fingerprint_msg_type_t; + +typedef enum fingerprint_error { + FINGERPRINT_ERROR_HW_UNAVAILABLE = 1, + FINGERPRINT_ERROR_BAD_CAPTURE = 2, + FINGERPRINT_ERROR_TIMEOUT = 3, + FINGERPRINT_ERROR_NO_SPACE = 4 /* No space available to store a template */ +} fingerprint_error_t; + +typedef struct fingerprint_enroll { + uint32_t id; + /* samples_remaining goes from N (no data collected, but N scans needed) + * to 0 (no more data is needed to build a template). + * The progress indication may be augmented by a bitmap encoded indication + * of finger area that needs to be presented by the user. + * Bit numbers mapped to physical location: + * + * distal + * +-+-+-+ + * |2|1|0| + * |5|4|3| + * medial |8|7|6| lateral + * |b|a|9| + * |e|d|c| + * +-+-+-+ + * proximal + * + */ + uint16_t data_collected_bmp; + uint16_t samples_remaining; +} fingerprint_enroll_t; + +typedef struct fingerprint_removed { + uint32_t id; +} fingerprint_removed_t; + +typedef struct fingerprint_scanned { + uint32_t id; /* 0 is a special id and means no match */ +} fingerprint_scanned_t; + +typedef struct fingerprint_msg { + fingerprint_msg_type_t type; + union { + uint64_t raw; + fingerprint_error_t error; + fingerprint_enroll_t enroll; + fingerprint_removed_t removed; + fingerprint_scanned_t scan; + } data; +} fingerprint_msg_t; + +/* Callback function type */ +typedef void (*fingerprint_notify_t)(fingerprint_msg_t msg); + +/* Synchronous operation */ +typedef struct fingerprint_device { + /** + * Common methods of the fingerprint device. This *must* be the first member of + * fingerprint_device as users of this structure will cast a hw_device_t to + * fingerprint_device pointer in contexts where it's known the hw_device_t references a + * fingerprint_device. + */ + struct hw_device_t common; + + /* + * Fingerprint enroll request: + * Switches the HAL state machine to collect and store a new fingerprint + * template. Switches back as soon as enroll is complete + * (fingerprint_msg.type == FINGERPRINT_TEMPLATE_ENROLLING && + * fingerprint_msg.data.enroll.samples_remaining == 0) + * or after timeout_sec seconds. + * + * Function return: 0 if enrollment process can be successfully started + * -1 otherwise. A notify() function may be called + * indicating the error condition. + */ + int (*enroll)(struct fingerprint_device *dev, uint32_t timeout_sec); + + /* + * Cancel fingerprint enroll request: + * Switches the HAL state machine back to accept a fingerprint scan mode. + * (fingerprint_msg.type == FINGERPRINT_TEMPLATE_ENROLLING && + * fingerprint_msg.data.enroll.samples_remaining == 0) + * will indicate switch back to the scan mode. + * + * Function return: 0 if cancel request is accepted + * -1 otherwise. + */ + int (*enroll_cancel)(struct fingerprint_device *dev); + + /* + * Fingerprint remove request: + * deletes a fingerprint template. + * If the fingerprint id is 0 the entire template database will be removed. + * notify() will be called for each template deleted with + * fingerprint_msg.type == FINGERPRINT_TEMPLATE_REMOVED and + * fingerprint_msg.data.removed.id indicating each template id removed. + * + * Function return: 0 if fingerprint template(s) can be successfully deleted + * -1 otherwise. + */ + int (*remove)(struct fingerprint_device *dev, uint32_t fingerprint_id); + + /* + * Set notification callback: + * Registers a user function that would receive notifications from the HAL + * The call will block if the HAL state machine is in busy state until HAL + * leaves the busy state. + * + * Function return: 0 if callback function is successfuly registered + * -1 otherwise. + */ + int (*set_notify)(struct fingerprint_device *dev, + fingerprint_notify_t notify); + + /* + * Client provided callback function to receive notifications. + * Do not set by hand, use the function above instead. + */ + fingerprint_notify_t notify; + + /* Reserved for future use. Must be NULL. */ + void* reserved[8 - 4]; +} fingerprint_device_t; + +typedef struct fingerprint_module { + /** + * Common methods of the fingerprint module. This *must* be the first member of + * fingerprint_module as users of this structure will cast a hw_module_t to + * fingerprint_module pointer in contexts where it's known the hw_module_t references a + * fingerprint_module. + */ + struct hw_module_t common; +} fingerprint_module_t; + +#endif /* ANDROID_INCLUDE_HARDWARE_FINGERPRINT_H */ diff --git a/include/hardware/gps.h b/include/hardware/gps.h index 458b5b4..4167793 100644 --- a/include/hardware/gps.h +++ b/include/hardware/gps.h @@ -221,6 +221,11 @@ typedef uint16_t AGpsStatusValue; #define AGPS_INTERFACE "agps" /** + * Name of the Supl Certificate interface. + */ +#define SUPL_CERTIFICATE_INTERFACE "supl-certificate" + +/** * Name for NI interface */ #define GPS_NI_INTERFACE "gps-ni" @@ -507,7 +512,7 @@ typedef struct { */ void (*init)( AGpsCallbacks* callbacks ); /** - * Notifies that a data connection is available and sets + * Notifies that a data connection is available and sets * the name of the APN to be used for SUPL. */ int (*data_conn_open)( const char* apn ); @@ -516,7 +521,7 @@ typedef struct { */ int (*data_conn_closed)(); /** - * Notifies that a data connection is not available for AGPS. + * Notifies that a data connection is not available for AGPS. */ int (*data_conn_failed)(); /** @@ -525,6 +530,72 @@ typedef struct { int (*set_server)( AGpsType type, const char* hostname, int port ); } AGpsInterface; +/** Error codes associated with certificate operations */ +#define AGPS_CERTIFICATE_OPERATION_SUCCESS 0 +#define AGPS_CERTIFICATE_ERROR_GENERIC -100 +#define AGPS_CERTIFICATE_ERROR_TOO_MANY_CERTIFICATES -101 + +/** A data structure that represents an X.509 certificate using DER encoding */ +typedef struct { + size_t length; + u_char* data; +} DerEncodedCertificate; + +/** + * A type definition for SHA1 Fingerprints used to identify X.509 Certificates + * The Fingerprint is a digest of the DER Certificate that uniquely identifies it. + */ +typedef struct { + u_char data[20]; +} Sha1CertificateFingerprint; + +/** AGPS Inteface to handle SUPL certificate operations */ +typedef struct { + /** set to sizeof(SuplCertificateInterface) */ + size_t size; + + /** + * Installs a set of Certificates used for SUPL connections to the AGPS server. + * If needed the HAL should find out internally any certificates that need to be removed to + * accommodate the certificates to install. + * The certificates installed represent a full set of valid certificates needed to connect to + * AGPS SUPL servers. + * The list of certificates is required, and all must be available at the same time, when trying + * to establish a connection with the AGPS Server. + * + * Parameters: + * certificates - A pointer to an array of DER encoded certificates that are need to be + * installed in the HAL. + * length - The number of certificates to install. + * Returns: + * AGPS_CERTIFICATE_OPERATION_SUCCESS if the operation is completed successfully + * AGPS_CERTIFICATE_ERROR_TOO_MANY_CERTIFICATES if the HAL cannot store the number of + * certificates attempted to be installed, the state of the certificates stored should + * remain the same as before on this error case. + * + * IMPORTANT: + * If needed the HAL should find out internally the set of certificates that need to be + * removed to accommodate the certificates to install. + */ + int (*install_certificates) ( const DerEncodedCertificate* certificates, size_t length ); + + /** + * Notifies the HAL that a list of certificates used for SUPL connections are revoked. It is + * expected that the given set of certificates is removed from the internal store of the HAL. + * + * Parameters: + * fingerprints - A pointer to an array of SHA1 Fingerprints to identify the set of + * certificates to revoke. + * length - The number of fingerprints provided. + * Returns: + * AGPS_CERTIFICATE_OPERATION_SUCCESS if the operation is completed successfully. + * + * IMPORTANT: + * If any of the certificates provided (through its fingerprint) is not known by the HAL, + * it should be ignored and continue revoking/deleting the rest of them. + */ + int (*revoke_certificates) ( const Sha1CertificateFingerprint* fingerprints, size_t length ); +} SuplCertificateInterface; /** Represents an NI request */ typedef struct { diff --git a/include/hardware/hardware.h b/include/hardware/hardware.h index 416ae39..74f57aa 100644 --- a/include/hardware/hardware.h +++ b/include/hardware/hardware.h @@ -144,8 +144,12 @@ typedef struct hw_module_t { /** module's dso */ void* dso; +#ifdef __LP64__ + uint64_t reserved[32-7]; +#else /** padding to 128 bytes, reserved for future use */ uint32_t reserved[32-7]; +#endif } hw_module_t; @@ -186,7 +190,11 @@ typedef struct hw_device_t { struct hw_module_t* module; /** padding reserved for future use */ +#ifdef __LP64__ + uint64_t reserved[12]; +#else uint32_t reserved[12]; +#endif /** Close this device */ int (*close)(struct hw_device_t* device); diff --git a/include/hardware/hdmi_cec.h b/include/hardware/hdmi_cec.h index f049952..46294ae 100644 --- a/include/hardware/hdmi_cec.h +++ b/include/hardware/hdmi_cec.h @@ -91,7 +91,7 @@ enum cec_message_type { CEC_MESSAGE_TIMER_CLEARED_STATUS = 0x043, CEC_MESSAGE_USER_CONTROL_PRESSED = 0x44, CEC_MESSAGE_USER_CONTROL_RELEASED = 0x45, - CEC_MESSAGE_GET_OSD_NAME = 0x46, + CEC_MESSAGE_GIVE_OSD_NAME = 0x46, CEC_MESSAGE_SET_OSD_NAME = 0x47, CEC_MESSAGE_SET_OSD_STRING = 0x64, CEC_MESSAGE_SET_TIMER_PROGRAM_TITLE = 0x67, @@ -129,6 +129,12 @@ enum cec_message_type { CEC_MESSAGE_VENDOR_COMMAND_WITH_ID = 0xA0, CEC_MESSAGE_CLEAR_EXTERNAL_TIMER = 0xA1, CEC_MESSAGE_SET_EXTERNAL_TIMER = 0xA2, + CEC_MESSAGE_INITIATE_ARC = 0xC0, + CEC_MESSAGE_REPORT_ARC_INITIATED = 0xC1, + CEC_MESSAGE_REPORT_ARC_TERMINATED = 0xC2, + CEC_MESSAGE_REQUEST_ARC_INITIATION = 0xC3, + CEC_MESSAGE_REQUEST_ARC_TERMINATION = 0xC4, + CEC_MESSAGE_TERMINATE_ARC = 0xC5, CEC_MESSAGE_ABORT = 0xFF }; @@ -149,7 +155,8 @@ enum abort_reason { */ enum { HDMI_EVENT_CEC_MESSAGE = 1, - HDMI_EVENT_HOT_PLUG = 2 + HDMI_EVENT_HOT_PLUG = 2, + HDMI_EVENT_TX_STATUS = 3, }; /* @@ -162,25 +169,74 @@ enum { }; /* + * TX result type. Used when the event type is HDMI_EVENT_TX_STATUS. + */ +enum { + HDMI_TX_STATUS_SUCCESS = 0, + HDMI_TX_STATUS_TIMEDOUT = 1, /* failed on wait */ + HDMI_TX_STATUS_NOCONN = 2 /* connection problem */ +}; + +/* + * error code used for send_message. + */ +enum { + HDMI_RESULT_SUCCESS = 0, + HDMI_RESULT_NACK = 1, /* not acknowledged */ + HDMI_RESULT_BUSY = 2 /* bus is busy */ +}; + +/* + * HDMI port type. + */ +typedef enum hdmi_port_type { + HDMI_INPUT = 0, + HDMI_OUTPUT = 1 +} hdmi_port_type_t; + +/* + * Flags used for set_option() + */ +enum { + /* When set to false, HAL does not wake up the system upon receiving + * <Image View On> or <Text View On>. Used when user changes the TV + * settings to disable the auto TV on functionality. + * True by default. + */ + HDMI_OPTION_WAKEUP = 1, + + /* When set to false, all the CEC commands are discarded. Used when + * user changes the TV settings to disable CEC functionality. + * True by default. + */ + HDMI_OPTION_ENABLE_CEC = 2, + + /* Setting this flag to false means Android system will stop handling + * CEC service and yield the control over to the microprocessor that is + * powered on through the standby mode. When set to true, the system + * will gain the control over, hence telling the microprocessor to stop + * handling the cec commands. This is called when system goes + * in and out of standby mode to notify the microprocessor that it should + * start/stop handling CEC commands on behalf of the system. + * False by default. + */ + HDMI_OPTION_SYSTEM_CEC_CONTROL = 3, +}; + +/* * Maximum length in bytes of cec message body (exclude header block), * should not exceed 16 (spec CEC 6 Frame Description) */ #define CEC_MESSAGE_BODY_MAX_LENGTH 16 typedef struct cec_message { - /* - * logical address of sender - */ + /* logical address of sender */ cec_logical_address_t initiator; - /* - * logical address of receiver - */ + /* logical address of receiver */ cec_logical_address_t destination; - /* - * length in bytes of body, range [0, CEC_MESSAGE_BODY_MAX_LENGTH] - */ + /* Length in bytes of body, range [0, CEC_MESSAGE_BODY_MAX_LENGTH] */ size_t length; unsigned char body[CEC_MESSAGE_BODY_MAX_LENGTH]; } cec_message_t; @@ -190,8 +246,14 @@ typedef struct hotplug_event { * true if the cable is connected; otherwise false. */ int connected; + int port; } hotplug_event_t; +typedef struct tx_status_event { + int status; + int opcode; /* CEC opcode */ +} tx_status_event_t; + /* * HDMI event generated from HAL. */ @@ -201,16 +263,33 @@ typedef struct hdmi_event { union { cec_message_t cec; hotplug_event_t hotplug; + tx_status_event_t tx_status; }; } hdmi_event_t; /* + * HDMI port descriptor + */ +typedef struct hdmi_port_info { + hdmi_port_type_t type; + int port_num; + int cec_supported; + int arc_supported; + uint16_t physical_address; +} hdmi_port_info_t; + +/* * Callback function type that will be called by HAL implementation. * Services can not close/open the device in the callback. */ typedef void (*event_callback_t)(const hdmi_event_t* event, void* arg); typedef struct hdmi_cec_module { + /** + * Common methods of the HDMI CEC module. This *must* be the first member of + * hdmi_cec_module as users of this structure will cast a hw_module_t to hdmi_cec_module + * pointer in contexts where it's known the hw_module_t references a hdmi_cec_module. + */ struct hw_module_t common; } hdmi_module_t; @@ -218,15 +297,22 @@ typedef struct hdmi_cec_module { * HDMI-CEC HAL interface definition. */ typedef struct hdmi_cec_device { + /** + * Common methods of the HDMI CEC device. This *must* be the first member of + * hdmi_cec_device as users of this structure will cast a hw_device_t to hdmi_cec_device + * pointer in contexts where it's known the hw_device_t references a hdmi_cec_device. + */ struct hw_device_t common; /* - * (*add_logical_address)() passes the logical address that will be used in this system. + * (*add_logical_address)() passes the logical address that will be used + * in this system. * * HAL may use it to configure the hardware so that the CEC commands addressed - * the given logical address can be filtered in. This method can be called as many times - * as necessary in order to support multiple logical devices. addr should be in the range - * of valid logical addresses for the call to succeed. + * the given logical address can be filtered in. This method can be called + * as many times as necessary in order to support multiple logical devices. + * addr should be in the range of valid logical addresses for the call + * to succeed. * * Returns 0 on success or -errno on error. */ @@ -235,8 +321,9 @@ typedef struct hdmi_cec_device { /* * (*clear_logical_address)() tells HAL to reset all the logical addresses. * - * It is used when the system doesn't need to process CEC command any more, hence to tell - * HAL to stop receiving commands from the CEC bus, and change the state back to the beginning. + * It is used when the system doesn't need to process CEC command any more, + * hence to tell HAL to stop receiving commands from the CEC bus, and change + * the state back to the beginning. */ void (*clear_logical_address)(const struct hdmi_cec_device* dev); @@ -254,12 +341,18 @@ typedef struct hdmi_cec_device { int (*get_physical_address)(const struct hdmi_cec_device* dev, uint16_t* addr); /* - * (*send_message)() transmits HDMI-CEC message to other HDMI device. The method should be - * designed to return in a certain amount of time not hanging forever, which can happen - * if CEC signal line is pulled low for some reason. HAL implementation should take - * the situation into account so as not to wait forever for the message to get sent out. + * (*send_message)() transmits HDMI-CEC message to other HDMI device. * - * Returns 0 on success or -errno on error. + * The method should be designed to return in a certain amount of time not + * hanging forever, which can happen if CEC signal line is pulled low for + * some reason. HAL implementation should take the situation into account + * so as not to wait forever for the message to get sent out. + * + * It should try retransmission at least once as specified in the standard, + * and later should report the transmission result via tx_status_event_t. + * + * Returns error code. See HDMI_RESULT_SUCCESS, HDMI_RESULT_NACK, and + * HDMI_RESULT_BUSY. */ int (*send_message)(const struct hdmi_cec_device* dev, const cec_message_t*); @@ -285,8 +378,39 @@ typedef struct hdmi_cec_device { */ void (*get_vendor_id)(const struct hdmi_cec_device* dev, uint32_t* vendor_id); + /* + * (*get_port_info)() returns the hdmi port information of underlying hardware. + * info is the list of HDMI port information, and 'total' is the number of + * HDMI ports in the system. + */ + void (*get_port_info)(const struct hdmi_cec_device* dev, + struct hdmi_port_info* list[], int* total); + + /* + * (*set_option)() passes flags controlling the way HDMI-CEC service works down + * to HAL implementation. Those flags will be used in case the feature needs + * update in HAL itself, firmware or microcontroller. + */ + void (*set_option)(const struct hdmi_cec_device* dev, int flag, int value); + + /* + * (*set_audio_return_channel)() configures ARC circuit in the hardware logic + * to start or stop the feature. Flag can be either 1 to start the feature + * or 0 to stop it. + * + * Returns 0 on success or -errno on error. + */ + void (*set_audio_return_channel)(const struct hdmi_cec_device* dev, int flag); + + /* + * (*is_connected)() returns the connection status of the specified port. + * Returns HDMI_CONNECTED if a device is connected, otherwise HDMI_NOT_CONNECTED. + * The HAL should watch for +5V power signal to determine the status. + */ + int (*is_connected)(const struct hdmi_cec_device* dev, int port); + /* Reserved for future use to maximum 16 functions. Must be NULL. */ - void* reserved[16 - 7]; + void* reserved[16 - 11]; } hdmi_cec_device_t; /** convenience API for opening and closing a device */ diff --git a/include/hardware/hwcomposer.h b/include/hardware/hwcomposer.h index 86479d3..049edea 100644 --- a/include/hardware/hwcomposer.h +++ b/include/hardware/hwcomposer.h @@ -121,6 +121,26 @@ typedef struct hwc_layer_1 { * that the layer will be handled by the HWC (ie: it must not be * composited with OpenGL ES). * + * + * HWC_SIDEBAND + * Set by the caller before calling (*prepare)(), this value indicates + * the contents of this layer come from a sideband video stream. + * + * The h/w composer is responsible for receiving new image buffers from + * the stream at the appropriate time (e.g. synchronized to a separate + * audio stream), compositing them with the current contents of other + * layers, and displaying the resulting image. This happens + * independently of the normal prepare/set cycle. The prepare/set calls + * only happen when other layers change, or when properties of the + * sideband layer such as position or size change. + * + * If the h/w composer can't handle the layer as a sideband stream for + * some reason (e.g. unsupported scaling/blending/rotation, or too many + * sideband layers) it can set compositionType to HWC_FRAMEBUFFER in + * (*prepare)(). However, doing so will result in the layer being shown + * as a solid color since the platform is not currently able to composite + * sideband layers with the GPU. This may be improved in future + * versions of the platform. */ int32_t compositionType; @@ -141,13 +161,21 @@ typedef struct hwc_layer_1 { hwc_color_t backgroundColor; struct { - /* handle of buffer to compose. This handle is guaranteed to have been - * allocated from gralloc using the GRALLOC_USAGE_HW_COMPOSER usage flag. If - * the layer's handle is unchanged across two consecutive prepare calls and - * the HWC_GEOMETRY_CHANGED flag is not set for the second call then the - * HWComposer implementation may assume that the contents of the buffer have - * not changed. */ - buffer_handle_t handle; + union { + /* When compositionType is HWC_FRAMEBUFFER, HWC_OVERLAY, + * HWC_FRAMEBUFFER_TARGET, this is the handle of the buffer to + * compose. This handle is guaranteed to have been allocated + * from gralloc using the GRALLOC_USAGE_HW_COMPOSER usage flag. + * If the layer's handle is unchanged across two consecutive + * prepare calls and the HWC_GEOMETRY_CHANGED flag is not set + * for the second call then the HWComposer implementation may + * assume that the contents of the buffer have not changed. */ + buffer_handle_t handle; + + /* When compositionType is HWC_SIDEBAND, this is the handle + * of the sideband video stream to compose. */ + const native_handle_t* sidebandStream; + }; /* transformation to apply to the buffer during composition */ uint32_t transform; @@ -191,6 +219,10 @@ typedef struct hwc_layer_1 { * reads from them are complete before the framebuffer is ready for * display. * + * HWC_SIDEBAND layers will never have an acquire fence, since + * synchronization is handled through implementation-defined + * sideband mechanisms. + * * The HWC takes ownership of the acquireFenceFd and is responsible * for closing it when no longer needed. */ @@ -214,6 +246,10 @@ typedef struct hwc_layer_1 { * produce a release fence for them. The releaseFenceFd will be -1 * for these layers when set() is called. * + * Since HWC_SIDEBAND buffers don't pass through the HWC client, + * the HWC shouldn't produce a release fence for them. The + * releaseFenceFd will be -1 for these layers when set() is called. + * * The HWC client taks ownership of the releaseFenceFd and is * responsible for closing it when no longer needed. */ @@ -435,10 +471,22 @@ typedef struct hwc_procs { /*****************************************************************************/ typedef struct hwc_module { + /** + * Common methods of the hardware composer module. This *must* be the first member of + * hwc_module as users of this structure will cast a hw_module_t to + * hwc_module pointer in contexts where it's known the hw_module_t references a + * hwc_module. + */ struct hw_module_t common; } hwc_module_t; typedef struct hwc_composer_device_1 { + /** + * Common methods of the hardware composer device. This *must* be the first member of + * hwc_composer_device_1 as users of this structure will cast a hw_device_t to + * hwc_composer_device_1 pointer in contexts where it's known the hw_device_t references a + * hwc_composer_device_1. + */ struct hw_device_t common; /* diff --git a/include/hardware/hwcomposer_defs.h b/include/hardware/hwcomposer_defs.h index c69a4bc..242e3f6 100644 --- a/include/hardware/hwcomposer_defs.h +++ b/include/hardware/hwcomposer_defs.h @@ -36,6 +36,7 @@ __BEGIN_DECLS #define HWC_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, HWC_HEADER_VERSION) #define HWC_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION_2(1, 2, HWC_HEADER_VERSION) #define HWC_DEVICE_API_VERSION_1_3 HARDWARE_DEVICE_API_VERSION_2(1, 3, HWC_HEADER_VERSION) +#define HWC_DEVICE_API_VERSION_1_4 HARDWARE_DEVICE_API_VERSION_2(1, 4, HWC_HEADER_VERSION) enum { /* hwc_composer_device_t::set failed in EGL */ @@ -95,6 +96,10 @@ enum { /* this layer holds the result of compositing the HWC_FRAMEBUFFER layers. * Added in HWC_DEVICE_API_VERSION_1_1. */ HWC_FRAMEBUFFER_TARGET = 3, + + /* this layer's contents are taken from a sideband buffer stream. + * Added in HWC_DEVICE_API_VERSION_1_4. */ + HWC_SIDEBAND = 4, }; /* diff --git a/include/hardware/keymaster.h b/include/hardware/keymaster.h index 12158bf..8c5ff14 100644 --- a/include/hardware/keymaster.h +++ b/include/hardware/keymaster.h @@ -83,6 +83,12 @@ enum { }; struct keystore_module { + /** + * Common methods of the keystore module. This *must* be the first member of + * keystore_module as users of this structure will cast a hw_module_t to + * keystore_module pointer in contexts where it's known the hw_module_t references a + * keystore_module. + */ hw_module_t common; }; @@ -166,6 +172,12 @@ typedef struct { * The parameters that can be set for a given keymaster implementation. */ struct keymaster_device { + /** + * Common methods of the keymaster device. This *must* be the first member of + * keymaster_device as users of this structure will cast a hw_device_t to + * keymaster_device pointer in contexts where it's known the hw_device_t references a + * keymaster_device. + */ struct hw_device_t common; /** @@ -282,4 +294,3 @@ static inline int keymaster_close(keymaster_device_t* device) __END_DECLS #endif // ANDROID_HARDWARE_KEYMASTER_H - diff --git a/include/hardware/local_time_hal.h b/include/hardware/local_time_hal.h index 6b6a317..946e799 100644 --- a/include/hardware/local_time_hal.h +++ b/include/hardware/local_time_hal.h @@ -55,6 +55,12 @@ struct local_time_module { }; struct local_time_hw_device { + /** + * Common methods of the local time hardware device. This *must* be the first member of + * local_time_hw_device as users of this structure will cast a hw_device_t to + * local_time_hw_device pointer in contexts where it's known the hw_device_t references a + * local_time_hw_device. + */ struct hw_device_t common; /** diff --git a/include/hardware/nfc.h b/include/hardware/nfc.h index 09523b3..0a8ed72 100644 --- a/include/hardware/nfc.h +++ b/include/hardware/nfc.h @@ -59,6 +59,12 @@ __BEGIN_DECLS * nfc_nci_module_t should contain module-specific parameters */ typedef struct nfc_nci_module_t { + /** + * Common methods of the NFC NCI module. This *must* be the first member of + * nfc_nci_module_t as users of this structure will cast a hw_module_t to + * nfc_nci_module_t pointer in contexts where it's known the hw_module_t references a + * nfc_nci_module_t. + */ struct hw_module_t common; } nfc_nci_module_t; @@ -108,6 +114,12 @@ typedef void (nfc_stack_data_callback_t) (uint16_t data_len, uint8_t* p_data); * All methods in the NCI HAL are asynchronous. */ typedef struct nfc_nci_device { + /** + * Common methods of the NFC NCI device. This *must* be the first member of + * nfc_nci_device_t as users of this structure will cast a hw_device_t to + * nfc_nci_device_t pointer in contexts where it's known the hw_device_t references a + * nfc_nci_device_t. + */ struct hw_device_t common; /* * (*open)() Opens the NFC controller device and performs initialization. @@ -210,6 +222,12 @@ static inline int nfc_nci_close(nfc_nci_device_t* dev) { #define NFC_PN544_CONTROLLER "pn544" typedef struct nfc_module_t { + /** + * Common methods of the NFC NXP PN544 module. This *must* be the first member of + * nfc_module_t as users of this structure will cast a hw_module_t to + * nfc_module_t pointer in contexts where it's known the hw_module_t references a + * nfc_module_t. + */ struct hw_module_t common; } nfc_module_t; @@ -227,6 +245,12 @@ typedef enum { } nfc_pn544_linktype; typedef struct { + /** + * Common methods of the NFC NXP PN544 device. This *must* be the first member of + * nfc_pn544_device_t as users of this structure will cast a hw_device_t to + * nfc_pn544_device_t pointer in contexts where it's known the hw_device_t references a + * nfc_pn544_device_t. + */ struct hw_device_t common; /* The number of EEPROM registers to write */ diff --git a/include/hardware/nfc_tag.h b/include/hardware/nfc_tag.h new file mode 100644 index 0000000..040a07d --- /dev/null +++ b/include/hardware/nfc_tag.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_NFC_TAG_HAL_INTERFACE_H +#define ANDROID_NFC_TAG_HAL_INTERFACE_H + +#include <stdint.h> + +#include <hardware/hardware.h> + +__BEGIN_DECLS + +/* + * HAL for programmable NFC tags. + * + */ + +#define NFC_TAG_HARDWARE_MODULE_ID "nfc_tag" +#define NFC_TAG_ID "tag" + +typedef struct nfc_tag_module_t { + /** + * Common methods of the NFC tag module. This *must* be the first member of + * nfc_tag_module_t as users of this structure will cast a hw_module_t to + * nfc_tag_module_t pointer in contexts where it's known the hw_module_t references a + * nfc_tag_module_t. + */ + struct hw_module_t common; +} nfc_tag_module_t; + +typedef struct nfc_tag_device { + /** + * Common methods of the NFC tag device. This *must* be the first member of + * nfc_tag_device_t as users of this structure will cast a hw_device_t to + * nfc_tag_device_t pointer in contexts where it's known the hw_device_t references a + * nfc_tag_device_t. + */ + struct hw_device_t common; + + /** + * Initialize the NFC tag. + * + * The driver must: + * * Set the static lock bytes to read only + * * Configure the Capability Container to disable write acess + * eg: 0xE1 0x10 <size> 0x0F + * + * This function is called once before any calls to setContent(). + * + * Return 0 on success or -errno on error. + */ + int (*init)(const struct nfc_tag_device *dev); + + /** + * Set the NFC tag content. + * + * The driver must write <data> in the data area of the tag starting at + * byte 0 of block 4 and zero the rest of the data area. + * + * Returns 0 on success or -errno on error. + */ + int (*setContent)(const struct nfc_tag_device *dev, const uint8_t *data, size_t len); + + /** + * Returns the memory size of the data area. + */ + int (*getMemorySize)(const struct nfc_tag_device *dev); +} nfc_tag_device_t; + +static inline int nfc_tag_open(const struct hw_module_t* module, + nfc_tag_device_t** dev) { + return module->methods->open(module, NFC_TAG_ID, + (struct hw_device_t**)dev); +} + +static inline int nfc_tag_close(nfc_tag_device_t* dev) { + return dev->common.close(&dev->common); +} + +__END_DECLS + +#endif // ANDROID_NFC_TAG_HAL_INTERFACE_H diff --git a/include/hardware/power.h b/include/hardware/power.h index 89d57ed..dc33705 100644 --- a/include/hardware/power.h +++ b/include/hardware/power.h @@ -44,7 +44,8 @@ typedef enum { * KLP. */ POWER_HINT_VIDEO_ENCODE = 0x00000003, - POWER_HINT_VIDEO_DECODE = 0x00000004 + POWER_HINT_VIDEO_DECODE = 0x00000004, + POWER_HINT_LOW_POWER = 0x00000005 } power_hint_t; /** @@ -112,6 +113,13 @@ typedef struct power_module { * and it may be appropriate to raise speeds of CPU, memory bus, * etc. The data parameter is unused. * + * POWER_HINT_LOW_POWER + * + * Low power mode is activated or deactivated. Low power mode + * is intended to save battery at the cost of performance. The data + * parameter is non-zero when low power mode is activated, and zero + * when deactivated. + * * A particular platform may choose to ignore any hint. * * availability: version 0.2 diff --git a/include/hardware/sensors.h b/include/hardware/sensors.h index 418c348..6a3bbae 100644 --- a/include/hardware/sensors.h +++ b/include/hardware/sensors.h @@ -34,6 +34,13 @@ __BEGIN_DECLS #define SENSORS_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION_2(1, 0, SENSORS_HEADER_VERSION) #define SENSORS_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, SENSORS_HEADER_VERSION) #define SENSORS_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION_2(1, 2, SENSORS_HEADER_VERSION) +#define SENSORS_DEVICE_API_VERSION_1_3 HARDWARE_DEVICE_API_VERSION_2(1, 3, SENSORS_HEADER_VERSION) + +/** + * Please see the Sensors section of source.android.com for an + * introduction to and detailed descriptions of Android sensor types: + * http://source.android.com/devices/sensors/index.html + */ /** * The id of this module @@ -57,9 +64,12 @@ __BEGIN_DECLS /* + * **** Deprecated ***** * flags for (*batch)() * Availability: SENSORS_DEVICE_API_VERSION_1_0 - * see (*batch)() documentation for details + * see (*batch)() documentation for details. + * Deprecated as of SENSORS_DEVICE_API_VERSION_1_3. + * WAKE_UP_* sensors replace WAKE_UPON_FIFO_FULL concept. */ enum { SENSORS_BATCH_DRY_RUN = 0x00000001, @@ -82,74 +92,25 @@ enum { */ #define SENSOR_PERMISSION_BODY_SENSORS "android.permission.BODY_SENSORS" -/** - * Definition of the axis used by the sensor HAL API - * - * This API is relative to the screen of the device in its default orientation, - * that is, if the device can be used in portrait or landscape, this API - * is only relative to the NATURAL orientation of the screen. In other words, - * the axis are not swapped when the device's screen orientation changes. - * Higher level services /may/ perform this transformation. - * - * x<0 x>0 - * ^ - * | - * +-----------+--> y>0 - * | | - * | | - * | | - * | | / z<0 - * | | / - * | | / - * O-----------+/ - * |[] [ ] []/ - * +----------/+ y<0 - * / - * / - * |/ z>0 (toward the sky) - * - * O: Origin (x=0,y=0,z=0) - * - */ - /* - * Interaction with suspend mode - * - * Unless otherwise noted, an enabled sensor shall not prevent the - * SoC to go into suspend mode. It is the responsibility of applications - * to keep a partial wake-lock should they wish to receive sensor - * events while the screen is off. While in suspend mode, and unless - * otherwise noted (batch mode, sensor particularities, ...), enabled sensors' - * events are lost. - * - * Note that conceptually, the sensor itself is not de-activated while in - * suspend mode -- it's just that the data it returns are lost. As soon as - * the SoC gets out of suspend mode, operations resume as usual. Of course, - * in practice sensors shall be disabled while in suspend mode to - * save power, unless batch mode is active, in which case they must - * continue fill their internal FIFO (see the documentation of batch() to - * learn how suspend interacts with batch mode). - * - * In batch mode, and only when the flag SENSORS_BATCH_WAKE_UPON_FIFO_FULL is - * set and supported, the specified sensor must be able to wake-up the SoC and - * be able to buffer at least 10 seconds worth of the requested sensor events. - * - * There are notable exceptions to this behavior, which are sensor-dependent - * (see sensor types definitions below) - * - * - * The sensor type documentation below specifies the wake-up behavior of - * each sensor: - * wake-up: yes this sensor must wake-up the SoC to deliver events - * wake-up: no this sensor shall not wake-up the SoC, events are dropped - * + * Availability: SENSORS_DEVICE_API_VERSION_1_3 + * Sensor flags used in sensor_t.flags. */ +enum { + /* + * Whether this sensor wakes up the AP from suspend mode when data is available. + */ + SENSOR_FLAG_WAKE_UP = 1U << 0 +}; /* * Sensor type * * Each sensor has a type which defines what this sensor measures and how - * measures are reported. All types are defined below. + * measures are reported. See the Base sensors and Composite sensors lists + * for complete descriptions: + * http://source.android.com/devices/sensors/base_triggers.html + * http://source.android.com/devices/sensors/composite_sensors.html * * Device manufacturers (OEMs) can define their own sensor types, for * their private use by applications or services provided by them. Such @@ -196,47 +157,6 @@ enum { #define SENSOR_TYPE_DEVICE_PRIVATE_BASE 0x10000 /* - * Sensor fusion and virtual sensors - * - * Many sensor types are or can be implemented as virtual sensors from - * physical sensors on the device. For instance the rotation vector sensor, - * orientation sensor, step-detector, step-counter, etc... - * - * From the point of view of this API these virtual sensors MUST appear as - * real, individual sensors. It is the responsibility of the driver and HAL - * to make sure this is the case. - * - * In particular, all sensors must be able to function concurrently. - * For example, if defining both an accelerometer and a step counter, - * then both must be able to work concurrently. - */ - -/* - * Trigger modes - * - * Sensors can report events in different ways called trigger modes, - * each sensor type has one and only one trigger mode associated to it. - * Currently there are four trigger modes defined: - * - * continuous: events are reported at a constant rate defined by setDelay(). - * eg: accelerometers, gyroscopes. - * on-change: events are reported only if the sensor's value has changed. - * setDelay() is used to set a lower limit to the reporting - * period (minimum time between two events). - * The HAL must return an event immediately when an on-change - * sensor is activated. - * eg: proximity, light sensors - * one-shot: upon detection of an event, the sensor deactivates itself and - * then sends a single event. Order matters to avoid race - * conditions. No other event is sent until the sensor get - * reactivated. setDelay() is ignored. - * eg: significant motion sensor - * special: see details in the sensor type specification below - * - */ - - -/* * SENSOR_TYPE_META_DATA * trigger-mode: n/a * wake-up sensor: n/a @@ -277,30 +197,6 @@ enum { * All values are in SI units (m/s^2) and measure the acceleration of the * device minus the force of gravity. * - * Acceleration sensors return sensor events for all 3 axes at a constant - * rate defined by setDelay(). - * - * x: Acceleration on the x-axis - * y: Acceleration on the y-axis - * z: Acceleration on the z-axis - * - * Note that the readings from the accelerometer include the acceleration - * due to gravity (which is opposite to the direction of the gravity vector). - * - * Examples: - * The norm of <x, y, z> should be close to 0 when in free fall. - * - * When the device lies flat on a table and is pushed on its left side - * toward the right, the x acceleration value is positive. - * - * When the device lies flat on a table, the acceleration value is +9.81, - * which correspond to the acceleration of the device (0 m/s^2) minus the - * force of gravity (-9.81 m/s^2). - * - * When the device lies flat on a table and is pushed toward the sky, the - * acceleration value is greater than +9.81, which correspond to the - * acceleration of the device (+A m/s^2) minus the force of - * gravity (-9.81 m/s^2). */ #define SENSOR_TYPE_ACCELEROMETER (1) #define SENSOR_STRING_TYPE_ACCELEROMETER "android.sensor.accelerometer" @@ -313,12 +209,6 @@ enum { * All values are in micro-Tesla (uT) and measure the geomagnetic * field in the X, Y and Z axis. * - * Returned values include calibration mechanisms such that the vector is - * aligned with the magnetic declination and heading of the earth's - * geomagnetic field. - * - * Magnetic Field sensors return sensor events for all 3 axes at a constant - * rate defined by setDelay(). */ #define SENSOR_TYPE_GEOMAGNETIC_FIELD (2) #define SENSOR_TYPE_MAGNETIC_FIELD SENSOR_TYPE_GEOMAGNETIC_FIELD @@ -328,39 +218,11 @@ enum { * SENSOR_TYPE_ORIENTATION * trigger-mode: continuous * wake-up sensor: no - * + * * All values are angles in degrees. - * + * * Orientation sensors return sensor events for all 3 axes at a constant * rate defined by setDelay(). - * - * azimuth: angle between the magnetic north direction and the Y axis, around - * the Z axis (0<=azimuth<360). - * 0=North, 90=East, 180=South, 270=West - * - * pitch: Rotation around X axis (-180<=pitch<=180), with positive values when - * the z-axis moves toward the y-axis. - * - * roll: Rotation around Y axis (-90<=roll<=90), with positive values when - * the x-axis moves towards the z-axis. - * - * Note: For historical reasons the roll angle is positive in the clockwise - * direction (mathematically speaking, it should be positive in the - * counter-clockwise direction): - * - * Z - * ^ - * (+roll) .--> | - * / | - * | | roll: rotation around Y axis - * X <-------(.) - * Y - * note that +Y == -roll - * - * - * - * Note: This definition is different from yaw, pitch and roll used in aviation - * where the X axis is along the long side of the plane (tail to nose). */ #define SENSOR_TYPE_ORIENTATION (3) #define SENSOR_STRING_TYPE_ORIENTATION "android.sensor.orientation" @@ -371,17 +233,7 @@ enum { * wake-up sensor: no * * All values are in radians/second and measure the rate of rotation - * around the X, Y and Z axis. The coordinate system is the same as is - * used for the acceleration sensor. Rotation is positive in the - * counter-clockwise direction (right-hand rule). That is, an observer - * looking from some positive location on the x, y or z axis at a device - * positioned on the origin would report positive rotation if the device - * appeared to be rotating counter clockwise. Note that this is the - * standard mathematical definition of positive rotation and does not agree - * with the definition of roll given earlier. - * The range should at least be 17.45 rad/s (ie: ~1000 deg/s). - * - * automatic gyro-drift compensation is allowed but not required. + * around the X, Y and Z axis. */ #define SENSOR_TYPE_GYROSCOPE (4) #define SENSOR_STRING_TYPE_GYROSCOPE "android.sensor.gyroscope" @@ -413,12 +265,9 @@ enum { /* * SENSOR_TYPE_PROXIMITY * trigger-mode: on-change - * wake-up sensor: yes + * wake-up sensor: yes (set SENSOR_FLAG_WAKE_UP flag) * - * The distance value is measured in centimeters. Note that some proximity - * sensors only support a binary "close" or "far" measurement. In this case, - * the sensor should report its maxRange value in the "far" state and a value - * less than maxRange in the "near" state. + * The value corresponds to the distance to the nearest object in centimeters. */ #define SENSOR_TYPE_PROXIMITY (8) #define SENSOR_STRING_TYPE_PROXIMITY "android.sensor.proximity" @@ -429,10 +278,7 @@ enum { * wake-up sensor: no * * A gravity output indicates the direction of and magnitude of gravity in - * the devices's coordinates. On Earth, the magnitude is 9.8 m/s^2. - * Units are m/s^2. The coordinate system is the same as is used for the - * acceleration sensor. When the device is at rest, the output of the - * gravity sensor should be identical to that of the accelerometer. + * the devices's coordinates. */ #define SENSOR_TYPE_GRAVITY (9) #define SENSOR_STRING_TYPE_GRAVITY "android.sensor.gravity" @@ -444,13 +290,6 @@ enum { * * Indicates the linear acceleration of the device in device coordinates, * not including gravity. - * - * The output is conceptually: - * output of TYPE_ACCELERATION - output of TYPE_GRAVITY - * - * Readings on all axes should be close to 0 when device lies on a table. - * Units are m/s^2. - * The coordinate system is the same as is used for the acceleration sensor. */ #define SENSOR_TYPE_LINEAR_ACCELERATION (10) #define SENSOR_STRING_TYPE_LINEAR_ACCELERATION "android.sensor.linear_acceleration" @@ -462,46 +301,7 @@ enum { * wake-up sensor: no * * The rotation vector symbolizes the orientation of the device relative to the - * East-North-Up coordinates frame. It is usually obtained by integration of - * accelerometer, gyroscope and magnetometer readings. - * - * The East-North-Up coordinate system is defined as a direct orthonormal basis - * where: - * - X points east and is tangential to the ground. - * - Y points north and is tangential to the ground. - * - Z points towards the sky and is perpendicular to the ground. - * - * The orientation of the phone is represented by the rotation necessary to - * align the East-North-Up coordinates with the phone's coordinates. That is, - * applying the rotation to the world frame (X,Y,Z) would align them with the - * phone coordinates (x,y,z). - * - * The rotation can be seen as rotating the phone by an angle theta around - * an axis rot_axis to go from the reference (East-North-Up aligned) device - * orientation to the current device orientation. - * - * The rotation is encoded as the 4 (reordered) components of a unit quaternion: - * sensors_event_t.data[0] = rot_axis.x*sin(theta/2) - * sensors_event_t.data[1] = rot_axis.y*sin(theta/2) - * sensors_event_t.data[2] = rot_axis.z*sin(theta/2) - * sensors_event_t.data[3] = cos(theta/2) - * where - * - rot_axis.x,y,z are the North-East-Up coordinates of a unit length vector - * representing the rotation axis - * - theta is the rotation angle - * - * The quaternion must be of norm 1 (it is a unit quaternion). Failure to ensure - * this will cause erratic client behaviour. - * - * In addition, this sensor reports an estimated heading accuracy. - * sensors_event_t.data[4] = estimated_accuracy (in radians) - * The heading error must be less than estimated_accuracy 95% of the time - * - * This sensor must use a gyroscope and an accelerometer as main orientation - * change input. - * - * This sensor can also include magnetometer input to make up for gyro drift, - * but it cannot be implemented using only a magnetometer. + * East-North-Up coordinates frame. */ #define SENSOR_TYPE_ROTATION_VECTOR (11) #define SENSOR_STRING_TYPE_ROTATION_VECTOR "android.sensor.rotation_vector" @@ -534,35 +334,6 @@ enum { * * Similar to SENSOR_TYPE_MAGNETIC_FIELD, but the hard iron calibration is * reported separately instead of being included in the measurement. - * Factory calibration and temperature compensation should still be applied to - * the "uncalibrated" measurement. - * Separating away the hard iron calibration estimation allows the system to - * better recover from bad hard iron estimation. - * - * All values are in micro-Tesla (uT) and measure the ambient magnetic - * field in the X, Y and Z axis. Assumptions that the the magnetic field - * is due to the Earth's poles should be avoided. - * - * The uncalibrated_magnetic event contains - * - 3 fields for uncalibrated measurement: x_uncalib, y_uncalib, z_uncalib. - * Each is a component of the measured magnetic field, with soft iron - * and temperature compensation applied, but not hard iron calibration. - * These values should be continuous (no re-calibration should cause a jump). - * - 3 fields for hard iron bias estimates: x_bias, y_bias, z_bias. - * Each field is a component of the estimated hard iron calibration. - * They represent the offsets to apply to the calibrated readings to obtain - * uncalibrated readings (x_uncalib ~= x_calibrated + x_bias) - * These values are expected to jump as soon as the estimate of the hard iron - * changes, and they should be stable the rest of the time. - * - * If this sensor is present, then the corresponding - * SENSOR_TYPE_MAGNETIC_FIELD must be present and both must return the - * same sensor_t::name and sensor_t::vendor. - * - * Minimum filtering should be applied to this sensor. In particular, low pass - * filters should be avoided. - * - * See SENSOR_TYPE_MAGNETIC_FIELD for more information */ #define SENSOR_TYPE_MAGNETIC_FIELD_UNCALIBRATED (14) #define SENSOR_STRING_TYPE_MAGNETIC_FIELD_UNCALIBRATED "android.sensor.magnetic_field_uncalibrated" @@ -573,21 +344,7 @@ enum { * wake-up sensor: no * * Similar to SENSOR_TYPE_ROTATION_VECTOR, but not using the geomagnetic - * field. Therefore the Y axis doesn't point north, but instead to some other - * reference. That reference is allowed to drift by the same order of - * magnitude than the gyroscope drift around the Z axis. - * - * This sensor does not report an estimated heading accuracy: - * sensors_event_t.data[4] is reserved and should be set to 0 - * - * In the ideal case, a phone rotated and returning to the same real-world - * orientation should report the same game rotation vector - * (without using the earth's geomagnetic field). - * - * This sensor must be based on a gyroscope. It cannot be implemented using - * a magnetometer. - * - * see SENSOR_TYPE_ROTATION_VECTOR for more details + * field. */ #define SENSOR_TYPE_GAME_ROTATION_VECTOR (15) #define SENSOR_STRING_TYPE_GAME_ROTATION_VECTOR "android.sensor.game_rotation_vector" @@ -598,92 +355,19 @@ enum { * wake-up sensor: no * * All values are in radians/second and measure the rate of rotation - * around the X, Y and Z axis. An estimation of the drift on each axis is - * reported as well. - * - * No gyro-drift compensation shall be performed. - * Factory calibration and temperature compensation should still be applied - * to the rate of rotation (angular speeds). - * - * The coordinate system is the same as is - * used for the acceleration sensor. Rotation is positive in the - * counter-clockwise direction (right-hand rule). That is, an observer - * looking from some positive location on the x, y or z axis at a device - * positioned on the origin would report positive rotation if the device - * appeared to be rotating counter clockwise. Note that this is the - * standard mathematical definition of positive rotation and does not agree - * with the definition of roll given earlier. - * The range should at least be 17.45 rad/s (ie: ~1000 deg/s). - * - * Content of an uncalibrated_gyro event: (units are rad/sec) - * x_uncalib : angular speed (w/o drift compensation) around the X axis - * y_uncalib : angular speed (w/o drift compensation) around the Y axis - * z_uncalib : angular speed (w/o drift compensation) around the Z axis - * x_bias : estimated drift around X axis in rad/s - * y_bias : estimated drift around Y axis in rad/s - * z_bias : estimated drift around Z axis in rad/s - * - * IMPLEMENTATION NOTES: - * - * If the implementation is not able to estimate the drift, then this - * sensor MUST NOT be reported by this HAL. Instead, the regular - * SENSOR_TYPE_GYROSCOPE is used without drift compensation. - * - * If this sensor is present, then the corresponding - * SENSOR_TYPE_GYROSCOPE must be present and both must return the - * same sensor_t::name and sensor_t::vendor. + * around the X, Y and Z axis. */ #define SENSOR_TYPE_GYROSCOPE_UNCALIBRATED (16) #define SENSOR_STRING_TYPE_GYROSCOPE_UNCALIBRATED "android.sensor.gyroscope_uncalibrated" - /* * SENSOR_TYPE_SIGNIFICANT_MOTION * trigger-mode: one-shot - * wake-up sensor: yes + * wake-up sensor: yes (set SENSOR_FLAG_WAKE_UP flag) * * A sensor of this type triggers an event each time significant motion * is detected and automatically disables itself. * The only allowed value to return is 1.0. - * - * A significant motion is a motion that might lead to a change in the user - * location. - * Examples of such motions are: - * walking, biking, sitting in a moving car, coach or train. - * Examples of situations that should not trigger significant motion: - * - phone in pocket and person is not moving - * - phone is on a table, even if the table shakes a bit due to nearby traffic - * or washing machine - * - * A note on false positive / false negative / power consumption tradeoff - * - The goal of this sensor is to save power. - * - Triggering an event when the user is not moving (false positive) is costly - * in terms of power, so it should be avoided. - * - Not triggering an event when the user is moving (false negative) is - * acceptable as long as it is not done repeatedly. If the user has been - * walking for 10 seconds, not triggering an event within those 10 seconds - * is not acceptable. - * - * IMPORTANT NOTE: this sensor type is very different from other types - * in that it must work when the screen is off without the need of - * holding a partial wake-lock and MUST allow the SoC to go into suspend. - * When significant motion is detected, the sensor must awaken the SoC and - * the event be reported. - * - * If a particular hardware cannot support this mode of operation then this - * sensor type MUST NOT be reported by the HAL. ie: it is not acceptable - * to "emulate" this sensor in the HAL. - * - * The whole point of this sensor type is to save power by keeping the - * SoC in suspend mode when the device is at rest. - * - * When the sensor is not activated, it must also be deactivated in the - * hardware: it must not wake up the SoC anymore, even in case of - * significant motion. - * - * setDelay() has no effect and is ignored. - * Once a "significant motion" event is returned, a sensor of this type - * must disables itself automatically, as if activate(..., 0) had been called. */ #define SENSOR_TYPE_SIGNIFICANT_MOTION (17) @@ -695,21 +379,8 @@ enum { * wake-up sensor: no * * A sensor of this type triggers an event each time a step is taken - * by the user. The only allowed value to return is 1.0 and an event is - * generated for each step. Like with any other event, the timestamp - * indicates when the event (here the step) occurred, this corresponds to when - * the foot hit the ground, generating a high variation in acceleration. - * - * While this sensor operates, it shall not disrupt any other sensors, in - * particular, but not limited to, the accelerometer; which might very well - * be in use as well. - * - * This sensor must be low power. That is, if the step detection cannot be - * done in hardware, this sensor should not be defined. Also, when the - * step detector is activated and the accelerometer is not, only steps should - * trigger interrupts (not accelerometer data). - * - * setDelay() has no impact on this sensor type + * by the user. The only allowed value to return is 1.0 and an event + * is generated for each step. */ #define SENSOR_TYPE_STEP_DETECTOR (18) @@ -724,46 +395,6 @@ enum { * A sensor of this type returns the number of steps taken by the user since * the last reboot while activated. The value is returned as a uint64_t and is * reset to zero only on a system / android reboot. - * - * The timestamp of the event is set to the time when the first step - * for that event was taken. - * See SENSOR_TYPE_STEP_DETECTOR for the signification of the time of a step. - * - * The minimum size of the hardware's internal counter shall be 16 bits - * (this restriction is here to avoid too frequent wake-ups when the - * delay is very large). - * - * IMPORTANT NOTE: this sensor type is different from other types - * in that it must work when the screen is off without the need of - * holding a partial wake-lock and MUST allow the SoC to go into suspend. - * Unlike other sensors, while in suspend mode this sensor must stay active, - * no events are reported during that time but, steps continue to be - * accounted for; an event will be reported as soon as the SoC resumes if - * the timeout has expired. - * - * In other words, when the screen is off and the device allowed to - * go into suspend mode, we don't want to be woken up, regardless of the - * setDelay() value, but the steps shall continue to be counted. - * - * The driver must however ensure that the internal step count never - * overflows. It is allowed in this situation to wake the SoC up so the - * driver can do the counter maintenance. - * - * While this sensor operates, it shall not disrupt any other sensors, in - * particular, but not limited to, the accelerometer; which might very well - * be in use as well. - * - * If a particular hardware cannot support these modes of operation then this - * sensor type MUST NOT be reported by the HAL. ie: it is not acceptable - * to "emulate" this sensor in the HAL. - * - * This sensor must be low power. That is, if the step detection cannot be - * done in hardware, this sensor should not be defined. Also, when the - * step counter is activated and the accelerometer is not, only steps should - * trigger interrupts (not accelerometer data). - * - * The whole point of this sensor type is to save power by keeping the - * SoC in suspend mode when the device is at rest. */ #define SENSOR_TYPE_STEP_COUNTER (19) @@ -776,18 +407,6 @@ enum { * * Similar to SENSOR_TYPE_ROTATION_VECTOR, but using a magnetometer instead * of using a gyroscope. - * - * This sensor must be based on a magnetometer. It cannot be implemented using - * a gyroscope, and gyroscope input cannot be used by this sensor, as the - * goal of this sensor is to be low power. - * The accelerometer can be (and usually is) used. - * - * Just like SENSOR_TYPE_ROTATION_VECTOR, this sensor reports an estimated - * heading accuracy: - * sensors_event_t.data[4] = estimated_accuracy (in radians) - * The heading error must be less than estimated_accuracy 95% of the time - * - * see SENSOR_TYPE_ROTATION_VECTOR for more details */ #define SENSOR_TYPE_GEOMAGNETIC_ROTATION_VECTOR (20) #define SENSOR_STRING_TYPE_GEOMAGNETIC_ROTATION_VECTOR "android.sensor.geomagnetic_rotation_vector" @@ -813,6 +432,135 @@ enum { #define SENSOR_TYPE_HEART_RATE (21) #define SENSOR_STRING_TYPE_HEART_RATE "android.sensor.heart_rate" +/* + * SENSOR_TYPE_NON_WAKE_UP_PROXIMITY_SENSOR + * Same as proximity_sensor but does not wake up the AP from suspend mode. + * wake-up sensor: no + */ +#define SENSOR_TYPE_NON_WAKE_UP_PROXIMITY_SENSOR (22) +#define SENSOR_STRING_TYPE_NON_WAKE_UP_PROXIMITY_SENSOR "android.sensor.non_wake_up_proximity_sensor" + +/* + * The sensors below are wake_up variants of the base sensor types defined + * above. When registered in batch mode, these sensors will wake up the AP when + * their FIFOs are full or when the batch timeout expires. A separate FIFO has + * to be maintained for wake up sensors and non wake up sensors. The non wake-up + * sensors need to overwrite their FIFOs when they are full till the AP wakes up + * and the wake-up sensors will wake-up the AP when their FIFOs are full or when + * the batch timeout expires without losing events. + * Note: Sensors of type SENSOR_TYPE_PROXIMITY are also wake up sensors and + * should be treated as such. Wake-up one-shot sensors like SIGNIFICANT_MOTION + * cannot be batched, hence the text about batch above doesn't apply to them. + * + * Define these sensors only if: + * 1) batching is supported. + * 2) wake-up and non wake-up variants of each sensor can be activated at + * different rates. + * + * wake-up sensor: yes + * Set SENSOR_FLAG_WAKE_UP flag for all these sensors. + */ +#define SENSOR_TYPE_WAKE_UP_ACCELEROMETER (23) +#define SENSOR_STRING_TYPE_WAKE_UP_ACCELEROMETER "android.sensor.wake_up_accelerometer" + +#define SENSOR_TYPE_WAKE_UP_MAGNETIC_FIELD (24) +#define SENSOR_STRING_TYPE_WAKE_UP_MAGNETIC_FIELD "android.sensor.wake_up_magnetic_field" + +#define SENSOR_TYPE_WAKE_UP_ORIENTATION (25) +#define SENSOR_STRING_TYPE_WAKE_UP_ORIENTATION "android.sensor.wake_up_orientation" + +#define SENSOR_TYPE_WAKE_UP_GYROSCOPE (26) +#define SENSOR_STRING_TYPE_WAKE_UP_GYROSCOPE "android.sensor.wake_up_gyroscope" + +#define SENSOR_TYPE_WAKE_UP_LIGHT (27) +#define SENSOR_STRING_TYPE_WAKE_UP_LIGHT "android.sensor.wake_up_light" + +#define SENSOR_TYPE_WAKE_UP_PRESSURE (28) +#define SENSOR_STRING_TYPE_WAKE_UP_PRESSURE "android.sensor.wake_up_pressure" + +#define SENSOR_TYPE_WAKE_UP_GRAVITY (29) +#define SENSOR_STRING_TYPE_WAKE_UP_GRAVITY "android.sensor.wake_up_gravity" + +#define SENSOR_TYPE_WAKE_UP_LINEAR_ACCELERATION (30) +#define SENSOR_STRING_TYPE_WAKE_UP_LINEAR_ACCELERATION "android.sensor.wake_up_linear_acceleration" + +#define SENSOR_TYPE_WAKE_UP_ROTATION_VECTOR (31) +#define SENSOR_STRING_TYPE_WAKE_UP_ROTATION_VECTOR "android.sensor.wake_up_rotation_vector" + +#define SENSOR_TYPE_WAKE_UP_RELATIVE_HUMIDITY (32) +#define SENSOR_STRING_TYPE_WAKE_UP_RELATIVE_HUMIDITY "android.sensor.wake_up_relative_humidity" + +#define SENSOR_TYPE_WAKE_UP_AMBIENT_TEMPERATURE (33) +#define SENSOR_STRING_TYPE_WAKE_UP_AMBIENT_TEMPERATURE "android.sensor.wake_up_ambient_temperature" + +#define SENSOR_TYPE_WAKE_UP_MAGNETIC_FIELD_UNCALIBRATED (34) +#define SENSOR_STRING_TYPE_WAKE_UP_MAGNETIC_FIELD_UNCALIBRATED "android.sensor.wake_up_magnetic_field_uncalibrated" + +#define SENSOR_TYPE_WAKE_UP_GAME_ROTATION_VECTOR (35) +#define SENSOR_STRING_TYPE_WAKE_UP_GAME_ROTATION_VECTOR "android.sensor.wake_up_game_rotation_vector" + +#define SENSOR_TYPE_WAKE_UP_GYROSCOPE_UNCALIBRATED (36) +#define SENSOR_STRING_TYPE_WAKE_UP_GYROSCOPE_UNCALIBRATED "android.sensor.wake_up_gyroscope_uncalibrated" + +#define SENSOR_TYPE_WAKE_UP_STEP_DETECTOR (37) +#define SENSOR_STRING_TYPE_WAKE_UP_STEP_DETECTOR "android.sensor.wake_up_step_detector" + +#define SENSOR_TYPE_WAKE_UP_STEP_COUNTER (38) +#define SENSOR_STRING_TYPE_WAKE_UP_STEP_COUNTER "android.sensor.wake_up_step_counter" + +#define SENSOR_TYPE_WAKE_UP_GEOMAGNETIC_ROTATION_VECTOR (39) +#define SENSOR_STRING_TYPE_WAKE_UP_GEOMAGNETIC_ROTATION_VECTOR "android.sensor.wake_up_geomagnetic_rotation_vector" + +#define SENSOR_TYPE_WAKE_UP_HEART_RATE (40) +#define SENSOR_STRING_TYPE_WAKE_UP_HEART_RATE "android.sensor.wake_up_heart_rate" + +/* + * SENSOR_TYPE_WAKE_UP_TILT_DETECTOR + * trigger-mode: special (setDelay has no impact) + * wake-up sensor: yes (set SENSOR_FLAG_WAKE_UP flag) + * + * A sensor of this type generates an event each time a tilt event is detected. A tilt event + * should be generated if the direction of the 2-seconds window average gravity changed by at least + * 35 degrees since the activation of the sensor. + * initial_estimated_gravity = average of accelerometer measurements over the first + * 1 second after activation. + * current_estimated_gravity = average of accelerometer measurements over the last 2 seconds. + * trigger when angle (initial_estimated_gravity, current_estimated_gravity) > 35 degrees + * + * Large accelerations without a change in phone orientation should not trigger a tilt event. + * For example, a sharp turn or strong acceleration while driving a car should not trigger a tilt + * event, even though the angle of the average acceleration might vary by more than 35 degrees. + * + * Typically, this sensor is implemented with the help of only an accelerometer. Other sensors can + * be used as well if they do not increase the power consumption significantly. This is a low power + * sensor that should allow the AP to go into suspend mode. Do not emulate this sensor in the HAL. + * Like other wake up sensors, the driver is expected to a hold a wake_lock with a timeout of 200 ms + * while reporting this event. The only allowed return value is 1.0. + */ +#define SENSOR_TYPE_WAKE_UP_TILT_DETECTOR (41) +#define SENSOR_STRING_TYPE_WAKE_UP_TILT_DETECTOR "android.sensor.wake_up_tilt_detector" + +/* + * SENSOR_TYPE_WAKE_GESTURE + * trigger-mode: one-shot + * wake-up sensor: yes (set SENSOR_FLAG_WAKE_UP flag) + * + * A sensor enabling waking up the device based on a device specific motion. + * + * When this sensor triggers, the device behaves as if the power button was + * pressed, turning the screen on. This behavior (turning on the screen when + * this sensor triggers) might be deactivated by the user in the device + * settings. Changes in settings do not impact the behavior of the sensor: + * only whether the framework turns the screen on when it triggers. + * + * The actual gesture to be detected is not specified, and can be chosen by + * the manufacturer of the device. + * This sensor must be low power, as it is likely to be activated 24/7. + * The only allowed value to return is 1.0. + */ +#define SENSOR_TYPE_WAKE_GESTURE (42) +#define SENSOR_STRING_TYPE_WAKE_GESTURE "android.sensor.wake_gesture" + /** * Values returned by the accelerometer in various locations in the universe. * all values are in SI units (m/s^2) @@ -968,7 +716,11 @@ typedef struct sensors_event_t { uint64_t step_counter; } u64; }; - uint32_t reserved1[4]; + + /* Reserved flags for internal use. Set to zero. */ + uint32_t flags; + + uint32_t reserved1[3]; } sensors_event_t; @@ -1008,7 +760,7 @@ struct sensor_t { * must increase when the driver is updated in a way that changes the * output of this sensor. This is important for fused sensors when the * fusion algorithm is updated. - */ + */ int version; /* handle that identifies this sensors. This handle is used to reference @@ -1069,15 +821,40 @@ struct sensor_t { */ const char* requiredPermission; + /* This value is defined only for continuous mode sensors. It is the delay between two + * sensor events corresponding to the lowest frequency that this sensor supports. When + * lower frequencies are requested through batch()/setDelay() the events will be generated + * at this frequency instead. It can be used by the framework or applications to estimate + * when the batch FIFO may be full. + * NOTE: period_ns is in nanoseconds where as maxDelay/minDelay are in microseconds. + * continuous: maximum sampling period allowed in microseconds. + * on-change, one-shot, special : -1 + * Availability: SENSORS_DEVICE_API_VERSION_1_3 + */ + #ifdef __LP64__ + int64_t maxDelay; + #else + int32_t maxDelay; + #endif + + /* Flags for sensor. See SENSOR_FLAG_* above. */ + #ifdef __LP64__ + uint64_t flags; + #else + uint32_t flags; + #endif + /* reserved fields, must be zero */ - void* reserved[4]; + void* reserved[2]; }; /* * sensors_poll_device_t is used with SENSORS_DEVICE_API_VERSION_0_1 * and is present for backward binary and source compatibility. - * (see documentation of the hooks in struct sensors_poll_device_1 below) + * See the Sensors HAL interface section for complete descriptions of the + * following functions: + * http://source.android.com/devices/sensors/index.html#hal */ struct sensors_poll_device_t { struct hw_device_t common; @@ -1102,70 +879,26 @@ typedef struct sensors_poll_device_1 { struct { struct hw_device_t common; - /* Activate/de-activate one sensor. + /* Activate/de-activate one sensor. Return 0 on success, negative * * handle is the handle of the sensor to change. * enabled set to 1 to enable, or 0 to disable the sensor. * - * if enabled is set to 1, the sensor is activated even if - * setDelay() wasn't called before. In this case, a default rate - * should be used. - * - * unless otherwise noted in the sensor types definitions, an - * activated sensor never prevents the SoC to go into suspend - * mode; that is, the HAL shall not hold a partial wake-lock on - * behalf of applications. - * - * one-shot sensors de-activate themselves automatically upon - * receiving an event and they must still accept to be deactivated - * through a call to activate(..., ..., 0). - * - * if "enabled" is 1 and the sensor is already activated, this - * function is a no-op and succeeds. - * - * if "enabled" is 0 and the sensor is already de-activated, - * this function is a no-op and succeeds. - * - * return 0 on success, negative errno code otherwise + * Return 0 on success, negative errno code otherwise. */ int (*activate)(struct sensors_poll_device_t *dev, int handle, int enabled); /** - * Set the events's period in nanoseconds for a given sensor. - * - * What the period_ns parameter means depends on the specified - * sensor's trigger mode: - * - * continuous: setDelay() sets the sampling rate. - * on-change: setDelay() limits the delivery rate of events - * one-shot: setDelay() is ignored. it has no effect. - * special: see specific sensor type definitions - * - * For continuous and on-change sensors, if the requested value is - * less than sensor_t::minDelay, then it's silently clamped to - * sensor_t::minDelay unless sensor_t::minDelay is 0, in which - * case it is clamped to >= 1ms. - * - * setDelay will not be called when the sensor is in batching mode. - * In this case, batch() will be called with the new period. - * - * @return 0 if successful, < 0 on error + * Set the events's period in nanoseconds for a given sensor. If + * period_ns > max_delay it will be truncated to max_delay and if + * period_ns < min_delay it will be replaced by min_delay. */ int (*setDelay)(struct sensors_poll_device_t *dev, int handle, int64_t period_ns); /** * Returns an array of sensor data. - * This function must block until events are available. - * - * return the number of events read on success, or -errno in case - * of an error. - * - * The number of events returned in data must be less or equal - * to the "count" argument. - * - * This function shall never return 0 (no event). */ int (*poll)(struct sensors_poll_device_t *dev, sensors_event_t* data, int count); @@ -1174,200 +907,9 @@ typedef struct sensors_poll_device_1 { /* - * Enables batch mode for the given sensor and sets the delay between events - * - * A timeout value of zero disables batch mode for the given sensor. - * - * The period_ns parameter is equivalent to calling setDelay() -- this - * function both enables or disables the batch mode AND sets the events's - * period in nanosecond. See setDelay() above for a detailed explanation of - * the period_ns parameter. - * - * BATCH MODE: - * ----------- - * In non-batch mode, all sensor events must be reported as soon as they - * are detected. For example, an accelerometer activated at 50Hz will - * trigger interrupts 50 times per second. - * While in batch mode, sensor events do not need to be reported as soon - * as they are detected. They can be temporarily stored in batches and - * reported in batches, as long as no event is delayed by more than - * "timeout" nanoseconds. That is, all events since the previous batch - * are recorded and returned all at once. This allows to reduce the amount - * of interrupts sent to the SoC, and allow the SoC to switch to a lower - * power state (Idle) while the sensor is capturing and batching data. - * - * setDelay() is not affected and it behaves as usual. - * - * Each event has a timestamp associated with it, the timestamp - * must be accurate and correspond to the time at which the event - * physically happened. - * - * Batching does not modify the behavior of poll(): batches from different - * sensors can be interleaved and split. As usual, all events from the same - * sensor are time-ordered. - * - * BEHAVIOUR OUTSIDE OF SUSPEND MODE: - * ---------------------------------- - * - * When the SoC is awake (not in suspend mode), events must be reported in - * batches at least every "timeout". No event shall be dropped or lost. - * If internal h/w FIFOs fill-up before the timeout, then events are - * reported at that point to ensure no event is lost. - * - * - * NORMAL BEHAVIOR IN SUSPEND MODE: - * --------------------------------- - * - * By default, batch mode doesn't significantly change the interaction with - * suspend mode. That is, sensors must continue to allow the SoC to - * go into suspend mode and sensors must stay active to fill their - * internal FIFO. In this mode, when the FIFO fills up, it shall wrap - * around (basically behave like a circular buffer, overwriting events). - * As soon as the SoC comes out of suspend mode, a batch is produced with - * as much as the recent history as possible, and batch operation - * resumes as usual. - * - * The behavior described above allows applications to record the recent - * history of a set of sensor while keeping the SoC into suspend. It - * also allows the hardware to not have to rely on a wake-up interrupt line. - * - * WAKE_UPON_FIFO_FULL BEHAVIOR IN SUSPEND MODE: - * ---------------------------------------------- - * - * There are cases, however, where an application cannot afford to lose - * any events, even when the device goes into suspend mode. - * For a given rate, if a sensor has the capability to store at least 10 - * seconds worth of events in its FIFO and is able to wake up the Soc, it - * can implement an optional secondary mode: the WAKE_UPON_FIFO_FULL mode. - * - * The caller will set the SENSORS_BATCH_WAKE_UPON_FIFO_FULL flag to - * activate this mode. If the sensor does not support this mode, batch() - * will fail when the flag is set. - * - * When running with the WAKE_UPON_FIFO_FULL flag set, no events can be - * lost. When the FIFO is getting full, the sensor must wake up the SoC from - * suspend and return a batch before the FIFO fills-up. - * Depending on the device, it might take a few miliseconds for the SoC to - * entirely come out of suspend and start flushing the FIFO. Enough head - * room must be allocated in the FIFO to allow the device to entirely come - * out of suspend without the FIFO overflowing (no events shall be lost). - * - * Implementing the WAKE_UPON_FIFO_FULL mode is optional. - * If the hardware cannot support this mode, or if the physical - * FIFO is so small that the device would never be allowed to go into - * suspend for at least 10 seconds, then this function MUST fail when - * the flag SENSORS_BATCH_WAKE_UPON_FIFO_FULL is set, regardless of - * the value of the timeout parameter. - * - * - * DRY RUN: - * -------- - * - * If the flag SENSORS_BATCH_DRY_RUN is set, this function returns - * without modifying the batch mode or the event period and has no side - * effects, but returns errors as usual (as it would if this flag was - * not set). This flag is used to check if batch mode is available for a - * given configuration -- in particular for a given sensor at a given rate. - * - * - * Return values: - * -------------- - * - * Because sensors must be independent, the return value must not depend - * on the state of the system (whether another sensor is on or not), - * nor on whether the flag SENSORS_BATCH_DRY_RUN is set (in other words, - * if a batch call with SENSORS_BATCH_DRY_RUN is successful, - * the same call without SENSORS_BATCH_DRY_RUN must succeed as well). - * - * When timeout is not 0: - * If successful, 0 is returned. - * If the specified sensor doesn't support batch mode, return -EINVAL. - * If the specified sensor's trigger-mode is one-shot, return -EINVAL. - * If WAKE_UPON_FIFO_FULL is specified and the specified sensor's internal - * FIFO is too small to store at least 10 seconds worth of data at the - * given rate, -EINVAL is returned. Note that as stated above, this has to - * be determined at compile time, and not based on the state of the - * system. - * If some other constraints above cannot be satisfied, return -EINVAL. - * - * Note: the timeout parameter, when > 0, has no impact on whether this - * function succeeds or fails. - * - * When timeout is 0: - * The caller will never set the wake_upon_fifo_full flag. - * The function must succeed, and batch mode must be deactivated. - * - * Independently of whether DRY_RUN is specified, When the call to batch() - * fails, no state should be changed. In particular, a failed call to - * batch() should not change the rate of the sensor. Example: - * setDelay(..., 10ms) - * batch(..., 20ms, ...) fails - * rate should stay 10ms. - * - * - * IMPLEMENTATION NOTES: - * --------------------- - * - * Batch mode, if supported, should happen at the hardware level, - * typically using hardware FIFOs. In particular, it SHALL NOT be - * implemented in the HAL, as this would be counter productive. - * The goal here is to save significant amounts of power. - * - * In some implementations, events from several sensors can share the - * same physical FIFO. In that case, all events in the FIFO can be sent and - * processed by the HAL as soon as one batch must be reported. - * For example, if the following sensors are activated: - * - accelerometer batched with timeout = 20s - * - gyroscope batched with timeout = 5s - * then the accelerometer batches can be reported at the same time the - * gyroscope batches are reported (every 5 seconds) - * - * Batch mode can be enabled or disabled at any time, in particular - * while the specified sensor is already enabled, and this shall not - * result in the loss of events. - * - * COMPARATIVE IMPORTANCE OF BATCHING FOR DIFFERENT SENSORS: - * --------------------------------------------------------- - * - * On platforms on which hardware fifo size is limited, the system designers - * might have to choose how much fifo to reserve for each sensor. To help - * with this choice, here is a list of applications made possible when - * batching is implemented on the different sensors. - * - * High value: Low power pedestrian dead reckoning - * Target batching time: 20 seconds to 1 minute - * Sensors to batch: - * - Step detector - * - Rotation vector or game rotation vector at 5Hz - * Gives us step and heading while letting the SoC go to Suspend. - * - * High value: Medium power activity/gesture recognition - * Target batching time: 3 seconds - * Sensors to batch: accelerometer between 20Hz and 50Hz - * Allows recognizing arbitrary activities and gestures without having - * to keep the SoC fully awake while the data is collected. - * - * Medium-high value: Interrupt load reduction - * Target batching time: < 1 second - * Sensors to batch: any high frequency sensor. - * If the gyroscope is set at 800Hz, even batching just 10 gyro events can - * reduce the number of interrupts from 800/second to 80/second. - * - * Medium value: Continuous low frequency data collection - * Target batching time: > 1 minute - * Sensors to batch: barometer, humidity sensor, other low frequency - * sensors. - * Allows creating monitoring applications at low power. - * - * Medium value: Continuous full-sensors collection - * Target batching time: > 1 minute - * Sensors to batch: all, at high frequencies - * Allows full collection of sensor data while leaving the SoC in - * suspend mode. Only to consider if fifo space is not an issue. - * - * In each of the cases above, if WAKE_UPON_FIFO_FULL is implemented, the - * applications might decide to let the SoC go to suspend, allowing for even - * more power savings. + * Enables batch mode for the given sensor and sets the delay between events. + * See the Batching sensor results page for details: + * http://source.android.com/devices/sensors/batching.html */ int (*batch)(struct sensors_poll_device_1* dev, int handle, int flags, int64_t period_ns, int64_t timeout); @@ -1375,29 +917,7 @@ typedef struct sensors_poll_device_1 { /* * Flush adds a META_DATA_FLUSH_COMPLETE event (sensors_event_meta_data_t) * to the end of the "batch mode" FIFO for the specified sensor and flushes - * the FIFO; those events are delivered as usual (i.e.: as if the batch - * timeout had expired) and removed from the FIFO. - * - * See the META_DATA_FLUSH_COMPLETE section for details about the - * META_DATA_FLUSH_COMPLETE event. - * - * The flush happens asynchronously (i.e.: this function must return - * immediately). - * - * If the implementation uses a single FIFO for several sensors, that - * FIFO is flushed and the META_DATA_FLUSH_COMPLETE event is added only - * for the specified sensor. - * - * If the specified sensor wasn't in batch mode, flush succeeds and - * promptly sends a META_DATA_FLUSH_COMPLETE event for that sensor. - * - * If the FIFO was empty at the time of the call, flush returns - * 0 (success) and promptly sends a META_DATA_FLUSH_COMPLETE event - * for that sensor. - * - * If the specified sensor wasn't enabled, flush returns -EINVAL. - * - * return 0 on success, negative errno code otherwise. + * the FIFO. */ int (*flush)(struct sensors_poll_device_1* dev, int handle); diff --git a/include/hardware/tv_input.h b/include/hardware/tv_input.h new file mode 100644 index 0000000..f2d03f1 --- /dev/null +++ b/include/hardware/tv_input.h @@ -0,0 +1,333 @@ +/* + * Copyright 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_TV_INPUT_INTERFACE_H +#define ANDROID_TV_INPUT_INTERFACE_H + +#include <stdint.h> +#include <sys/cdefs.h> +#include <sys/types.h> + +#include <hardware/hardware.h> +#include <system/window.h> + +__BEGIN_DECLS + +/* + * Module versioning information for the TV input hardware module, based on + * tv_input_module_t.common.module_api_version. + * + * Version History: + * + * TV_INPUT_MODULE_API_VERSION_0_1: + * Initial TV input hardware module API. + * + */ + +#define TV_INPUT_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1) + +#define TV_INPUT_DEVICE_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION(0, 1) + +/* + * The id of this module + */ +#define TV_INPUT_HARDWARE_MODULE_ID "tv_input" + +#define TV_INPUT_DEFAULT_DEVICE "default" + +/*****************************************************************************/ + +/* + * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM + * and the fields of this data structure must begin with hw_module_t + * followed by module specific information. + */ +typedef struct tv_input_module { + struct hw_module_t common; +} tv_input_module_t; + +/*****************************************************************************/ + +typedef enum tv_input_type { + /* HDMI */ + TV_INPUT_TYPE_HDMI = 1, + + /* Built-in tuners. */ + TV_INPUT_TYPE_BUILT_IN_TUNER = 2, + + /* Passthrough */ + TV_INPUT_TYPE_PASSTHROUGH = 3, +} tv_input_type_t; + +typedef struct tv_input_device_info { + /* Device ID */ + int device_id; + + /* Type of physical TV input. */ + tv_input_type_t type; + + /* + * TODO: A union of type specific information. For example, HDMI port + * identifier that HDMI hardware understands. + */ + + /* TODO: Add capability if necessary. */ + + /* TODO: Audio info */ +} tv_input_device_info_t; + +typedef enum { + /* + * Hardware notifies the framework that a device is available. + */ + TV_INPUT_EVENT_DEVICE_AVAILABLE = 1, + /* + * Hardware notifies the framework that a device is unavailable. + */ + TV_INPUT_EVENT_DEVICE_UNAVAILABLE = 2, + /* + * Stream configurations are changed. Client should regard all open streams + * at the specific device are closed, and should call + * get_stream_configurations() again, opening some of them if necessary. + */ + TV_INPUT_EVENT_STREAM_CONFIGURATIONS_CHANGED = 3, + /* + * Hardware is done with capture request with the buffer. Client can assume + * ownership of the buffer again. + */ + TV_INPUT_EVENT_CAPTURE_SUCCEEDED = 4, + /* + * Hardware met a failure while processing a capture request or client + * canceled the request. Client can assume ownership of the buffer again. + */ + TV_INPUT_EVENT_CAPTURE_FAILED = 5, +} tv_input_event_type_t; + +typedef struct tv_input_capture_result { + /* Device ID */ + int device_id; + + /* Stream ID */ + int stream_id; + + /* Sequence number of the request */ + uint32_t seq; + + /* + * The buffer passed to hardware in request_capture(). The content of + * buffer is undefined (although buffer itself is valid) for + * TV_INPUT_CAPTURE_FAILED event. + */ + buffer_handle_t buffer; + + /* + * Error code for the request. -ECANCELED if request is cancelled; other + * error codes are unknown errors. + */ + int error_code; +} tv_input_capture_result_t; + +typedef struct tv_input_event { + tv_input_event_type_t type; + + union { + /* + * TV_INPUT_EVENT_DEVICE_AVAILABLE: all fields are relevant + * TV_INPUT_EVENT_DEVICE_UNAVAILABLE: only device_id is relevant + * TV_INPUT_EVENT_STREAM_CONFIGURATIONS_CHANGED: only device_id is + * relevant + */ + tv_input_device_info_t device_info; + /* + * TV_INPUT_EVENT_CAPTURE_SUCCEEDED: error_code is not relevant + * TV_INPUT_EVENT_CAPTURE_FAILED: all fields are relevant + */ + tv_input_capture_result_t capture_result; + }; +} tv_input_event_t; + +typedef struct tv_input_callback_ops { + /* + * event contains the type of the event and additional data if necessary. + * The event object is guaranteed to be valid only for the duration of the + * call. + * + * data is an object supplied at device initialization, opaque to the + * hardware. + */ + void (*notify)(struct tv_input_device* dev, + tv_input_event_t* event, void* data); +} tv_input_callback_ops_t; + +typedef enum { + TV_STREAM_TYPE_INDEPENDENT_VIDEO_SOURCE = 1, + TV_STREAM_TYPE_BUFFER_PRODUCER = 2, +} tv_stream_type_t; + +typedef struct tv_stream_config { + /* + * ID number of the stream. This value is used to identify the whole stream + * configuration. + */ + int stream_id; + + /* Type of the stream */ + tv_stream_type_t type; + + /* Max width/height of the stream. */ + uint32_t max_video_width; + uint32_t max_video_height; +} tv_stream_config_t; + +typedef struct buffer_producer_stream { + /* + * IN/OUT: Width / height of the stream. Client may request for specific + * size but hardware may change it. Client must allocate buffers with + * specified width and height. + */ + uint32_t width; + uint32_t height; + + /* OUT: Client must set this usage when allocating buffer. */ + uint32_t usage; + + /* OUT: Client must allocate a buffer with this format. */ + uint32_t format; +} buffer_producer_stream_t; + +typedef struct tv_stream { + /* IN: ID in the stream configuration */ + int stream_id; + + /* OUT: Type of the stream (for convenience) */ + tv_stream_type_t type; + + /* Data associated with the stream for client's use */ + union { + /* OUT: A native handle describing the sideband stream source */ + native_handle_t* sideband_stream_source_handle; + + /* IN/OUT: Details are in buffer_producer_stream_t */ + buffer_producer_stream_t buffer_producer; + }; +} tv_stream_t; + +/* + * Every device data structure must begin with hw_device_t + * followed by module specific public methods and attributes. + */ +typedef struct tv_input_device { + struct hw_device_t common; + + /* + * initialize: + * + * Provide callbacks to the device and start operation. At first, no device + * is available and after initialize() completes, currently available + * devices including static devices should notify via callback. + * + * Framework owns callbacks object. + * + * data is a framework-owned object which would be sent back to the + * framework for each callback notifications. + * + * Return 0 on success. + */ + int (*initialize)(struct tv_input_device* dev, + const tv_input_callback_ops_t* callback, void* data); + + /* + * get_stream_configurations: + * + * Get stream configurations for a specific device. An input device may have + * multiple configurations. + * + * The configs object is guaranteed to be valid only until the next call to + * get_stream_configurations() or STREAM_CONFIGURATIONS_CHANGED event. + * + * Return 0 on success. + */ + int (*get_stream_configurations)(const struct tv_input_device* dev, + int device_id, int* num_configurations, + const tv_stream_config_t** configs); + + /* + * open_stream: + * + * Open a stream with given stream ID. Caller owns stream object, and the + * populated data is only valid until the stream is closed. + * + * Return 0 on success; -EBUSY if the client should close other streams to + * open the stream; -EEXIST if the stream with the given ID is already open; + * -EINVAL if device_id and/or stream_id are invalid; other non-zero value + * denotes unknown error. + */ + int (*open_stream)(struct tv_input_device* dev, int device_id, + tv_stream_t* stream); + + /* + * close_stream: + * + * Close a stream to a device. data in tv_stream_t* object associated with + * the stream_id is obsolete once this call finishes. + * + * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if + * device_id and/or stream_id are invalid. + */ + int (*close_stream)(struct tv_input_device* dev, int device_id, + int stream_id); + + /* + * request_capture: + * + * Request buffer capture for a stream. This is only valid for buffer + * producer streams. The buffer should be created with size, format and + * usage specified in the stream. Framework provides seq in an + * increasing sequence per each stream. Hardware should provide the picture + * in a chronological order according to seq. For example, if two + * requests are being processed at the same time, the request with the + * smaller seq should get an earlier frame. + * + * The framework releases the ownership of the buffer upon calling this + * function. When the buffer is filled, hardware notifies the framework + * via TV_INPUT_EVENT_CAPTURE_FINISHED callback, and the ownership is + * transferred back to framework at that time. + * + * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if + * device_id and/or stream_id are invalid; -EWOULDBLOCK if HAL cannot take + * additional requests until it releases a buffer. + */ + int (*request_capture)(struct tv_input_device* dev, int device_id, + int stream_id, buffer_handle_t buffer, uint32_t seq); + + /* + * cancel_capture: + * + * Cancel an ongoing capture. Hardware should release the buffer as soon as + * possible via TV_INPUT_EVENT_CAPTURE_FAILED callback. + * + * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if + * device_id, stream_id, and/or seq are invalid. + */ + int (*cancel_capture)(struct tv_input_device* dev, int device_id, + int stream_id, uint32_t seq); + + void* reserved[16]; +} tv_input_device_t; + +__END_DECLS + +#endif // ANDROID_TV_INPUT_INTERFACE_H diff --git a/include/hardware/vibrator.h b/include/hardware/vibrator.h index 795d23e..92b1fd0 100644 --- a/include/hardware/vibrator.h +++ b/include/hardware/vibrator.h @@ -35,7 +35,13 @@ __BEGIN_DECLS struct vibrator_device; typedef struct vibrator_device { - struct hw_device_t common; + /** + * Common methods of the vibrator device. This *must* be the first member of + * vibrator_device as users of this structure will cast a hw_device_t to + * vibrator_device pointer in contexts where it's known the hw_device_t references a + * vibrator_device. + */ + struct hw_device_t common; /** Turn on vibrator * diff --git a/modules/Android.mk b/modules/Android.mk index f1a6c1c..c903eee 100644 --- a/modules/Android.mk +++ b/modules/Android.mk @@ -1,4 +1,4 @@ hardware_modules := gralloc hwcomposer audio nfc nfc-nci local_time \ power usbaudio audio_remote_submix camera consumerir sensors vibrator \ - mcu + mcu tv_input fingerprint include $(call all-named-subdir-makefiles,$(hardware_modules)) diff --git a/modules/audio_remote_submix/audio_hw.cpp b/modules/audio_remote_submix/audio_hw.cpp index 433ef6c..f11b207 100644 --- a/modules/audio_remote_submix/audio_hw.cpp +++ b/modules/audio_remote_submix/audio_hw.cpp @@ -20,49 +20,128 @@ #include <errno.h> #include <pthread.h> #include <stdint.h> -#include <sys/time.h> #include <stdlib.h> +#include <sys/param.h> +#include <sys/time.h> +#include <sys/limits.h> #include <cutils/log.h> -#include <cutils/str_parms.h> #include <cutils/properties.h> +#include <cutils/str_parms.h> +#include <hardware/audio.h> #include <hardware/hardware.h> #include <system/audio.h> -#include <hardware/audio.h> +#include <media/AudioParameter.h> +#include <media/AudioBufferProvider.h> #include <media/nbaio/MonoPipe.h> #include <media/nbaio/MonoPipeReader.h> -#include <media/AudioBufferProvider.h> #include <utils/String8.h> -#include <media/AudioParameter.h> + +#define LOG_STREAMS_TO_FILES 0 +#if LOG_STREAMS_TO_FILES +#include <fcntl.h> +#include <stdio.h> +#include <sys/stat.h> +#endif // LOG_STREAMS_TO_FILES extern "C" { namespace android { -#define MAX_PIPE_DEPTH_IN_FRAMES (1024*8) +// Set to 1 to enable extremely verbose logging in this module. +#define SUBMIX_VERBOSE_LOGGING 0 +#if SUBMIX_VERBOSE_LOGGING +#define SUBMIX_ALOGV(...) ALOGV(__VA_ARGS__) +#define SUBMIX_ALOGE(...) ALOGE(__VA_ARGS__) +#else +#define SUBMIX_ALOGV(...) +#define SUBMIX_ALOGE(...) +#endif // SUBMIX_VERBOSE_LOGGING + +// NOTE: This value will be rounded up to the nearest power of 2 by MonoPipe(). +#define DEFAULT_PIPE_SIZE_IN_FRAMES (1024*8) +// Value used to divide the MonoPipe() buffer into segments that are written to the source and +// read from the sink. The maximum latency of the device is the size of the MonoPipe's buffer +// the minimum latency is the MonoPipe buffer size divided by this value. +#define DEFAULT_PIPE_PERIOD_COUNT 4 // The duration of MAX_READ_ATTEMPTS * READ_ATTEMPT_SLEEP_MS must be stricly inferior to // the duration of a record buffer at the current record sample rate (of the device, not of // the recording itself). Here we have: // 3 * 5ms = 15ms < 1024 frames * 1000 / 48000 = 21.333ms #define MAX_READ_ATTEMPTS 3 #define READ_ATTEMPT_SLEEP_MS 5 // 5ms between two read attempts when pipe is empty -#define DEFAULT_RATE_HZ 48000 // default sample rate +#define DEFAULT_SAMPLE_RATE_HZ 48000 // default sample rate +// See NBAIO_Format frameworks/av/include/media/nbaio/NBAIO.h. +#define DEFAULT_FORMAT AUDIO_FORMAT_PCM_16_BIT +// A legacy user of this device does not close the input stream when it shuts down, which +// results in the application opening a new input stream before closing the old input stream +// handle it was previously using. Setting this value to 1 allows multiple clients to open +// multiple input streams from this device. If this option is enabled, each input stream returned +// is *the same stream* which means that readers will race to read data from these streams. +#define ENABLE_LEGACY_INPUT_OPEN 1 +// Whether channel conversion (16-bit signed PCM mono->stereo, stereo->mono) is enabled. +#define ENABLE_CHANNEL_CONVERSION 1 +// Whether resampling is enabled. +#define ENABLE_RESAMPLING 1 +#if LOG_STREAMS_TO_FILES +// Folder to save stream log files to. +#define LOG_STREAM_FOLDER "/data/misc/media" +// Log filenames for input and output streams. +#define LOG_STREAM_OUT_FILENAME LOG_STREAM_FOLDER "/r_submix_out.raw" +#define LOG_STREAM_IN_FILENAME LOG_STREAM_FOLDER "/r_submix_in.raw" +// File permissions for stream log files. +#define LOG_STREAM_FILE_PERMISSIONS (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) +#endif // LOG_STREAMS_TO_FILES + +// Common limits macros. +#ifndef min +#define min(a, b) ((a) < (b) ? (a) : (b)) +#endif // min +#ifndef max +#define max(a, b) ((a) > (b) ? (a) : (b)) +#endif // max + +// Set *result_variable_ptr to true if value_to_find is present in the array array_to_search, +// otherwise set *result_variable_ptr to false. +#define SUBMIX_VALUE_IN_SET(value_to_find, array_to_search, result_variable_ptr) \ + { \ + size_t i; \ + *(result_variable_ptr) = false; \ + for (i = 0; i < sizeof(array_to_search) / sizeof((array_to_search)[0]); i++) { \ + if ((value_to_find) == (array_to_search)[i]) { \ + *(result_variable_ptr) = true; \ + break; \ + } \ + } \ + } +// Configuration of the submix pipe. struct submix_config { - audio_format_t format; - audio_channel_mask_t channel_mask; - unsigned int rate; // sample rate for the device - unsigned int period_size; // size of the audio pipe is period_size * period_count in frames - unsigned int period_count; + // Channel mask field in this data structure is set to either input_channel_mask or + // output_channel_mask depending upon the last stream to be opened on this device. + struct audio_config common; + // Input stream and output stream channel masks. This is required since input and output + // channel bitfields are not equivalent. + audio_channel_mask_t input_channel_mask; + audio_channel_mask_t output_channel_mask; +#if ENABLE_RESAMPLING + // Input stream and output stream sample rates. + uint32_t input_sample_rate; + uint32_t output_sample_rate; +#endif // ENABLE_RESAMPLING + size_t pipe_frame_size; // Number of bytes in each audio frame in the pipe. + size_t buffer_size_frames; // Size of the audio pipe in frames. + // Maximum number of frames buffered by the input and output streams. + size_t buffer_period_size_frames; }; struct submix_audio_device { struct audio_hw_device device; - bool output_standby; bool input_standby; + bool output_standby; submix_config config; // Pipe variables: they handle the ring buffer that "pipes" audio: // - from the submix virtual audio output == what needs to be played @@ -72,16 +151,30 @@ struct submix_audio_device { // A usecase example is one where the component capturing the audio is then sending it over // Wifi for presentation on a remote Wifi Display device (e.g. a dongle attached to a TV, or a // TV with Wifi Display capabilities), or to a wireless audio player. - sp<MonoPipe> rsxSink; + sp<MonoPipe> rsxSink; sp<MonoPipeReader> rsxSource; - - // device lock, also used to protect access to the audio pipe +#if ENABLE_RESAMPLING + // Buffer used as temporary storage for resampled data prior to returning data to the output + // stream. + int16_t resampler_buffer[DEFAULT_PIPE_SIZE_IN_FRAMES]; +#endif // ENABLE_RESAMPLING + + // Pointers to the current input and output stream instances. rsxSink and rsxSource are + // destroyed if both and input and output streams are destroyed. + struct submix_stream_out *output; + struct submix_stream_in *input; + + // Device lock, also used to protect access to submix_audio_device from the input and output + // streams. pthread_mutex_t lock; }; struct submix_stream_out { struct audio_stream_out stream; struct submix_audio_device *dev; +#if LOG_STREAMS_TO_FILES + int log_fd; +#endif // LOG_STREAMS_TO_FILES }; struct submix_stream_in { @@ -93,84 +186,445 @@ struct submix_stream_in { struct timespec record_start_time; // how many frames have been requested to be read int64_t read_counter_frames; + +#if ENABLE_LEGACY_INPUT_OPEN + // Number of references to this input stream. + volatile int32_t ref_count; +#endif // ENABLE_LEGACY_INPUT_OPEN +#if LOG_STREAMS_TO_FILES + int log_fd; +#endif // LOG_STREAMS_TO_FILES }; +// Determine whether the specified sample rate is supported by the submix module. +static bool sample_rate_supported(const uint32_t sample_rate) +{ + // Set of sample rates supported by Format_from_SR_C() frameworks/av/media/libnbaio/NAIO.cpp. + static const unsigned int supported_sample_rates[] = { + 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, + }; + bool return_value; + SUBMIX_VALUE_IN_SET(sample_rate, supported_sample_rates, &return_value); + return return_value; +} + +// Determine whether the specified sample rate is supported, if it is return the specified sample +// rate, otherwise return the default sample rate for the submix module. +static uint32_t get_supported_sample_rate(uint32_t sample_rate) +{ + return sample_rate_supported(sample_rate) ? sample_rate : DEFAULT_SAMPLE_RATE_HZ; +} + +// Determine whether the specified channel in mask is supported by the submix module. +static bool channel_in_mask_supported(const audio_channel_mask_t channel_in_mask) +{ + // Set of channel in masks supported by Format_from_SR_C() + // frameworks/av/media/libnbaio/NAIO.cpp. + static const audio_channel_mask_t supported_channel_in_masks[] = { + AUDIO_CHANNEL_IN_MONO, AUDIO_CHANNEL_IN_STEREO, + }; + bool return_value; + SUBMIX_VALUE_IN_SET(channel_in_mask, supported_channel_in_masks, &return_value); + return return_value; +} + +// Determine whether the specified channel in mask is supported, if it is return the specified +// channel in mask, otherwise return the default channel in mask for the submix module. +static audio_channel_mask_t get_supported_channel_in_mask( + const audio_channel_mask_t channel_in_mask) +{ + return channel_in_mask_supported(channel_in_mask) ? channel_in_mask : + static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_IN_STEREO); +} + +// Determine whether the specified channel out mask is supported by the submix module. +static bool channel_out_mask_supported(const audio_channel_mask_t channel_out_mask) +{ + // Set of channel out masks supported by Format_from_SR_C() + // frameworks/av/media/libnbaio/NAIO.cpp. + static const audio_channel_mask_t supported_channel_out_masks[] = { + AUDIO_CHANNEL_OUT_MONO, AUDIO_CHANNEL_OUT_STEREO, + }; + bool return_value; + SUBMIX_VALUE_IN_SET(channel_out_mask, supported_channel_out_masks, &return_value); + return return_value; +} + +// Determine whether the specified channel out mask is supported, if it is return the specified +// channel out mask, otherwise return the default channel out mask for the submix module. +static audio_channel_mask_t get_supported_channel_out_mask( + const audio_channel_mask_t channel_out_mask) +{ + return channel_out_mask_supported(channel_out_mask) ? channel_out_mask : + static_cast<audio_channel_mask_t>(AUDIO_CHANNEL_OUT_STEREO); +} + +// Get a pointer to submix_stream_out given an audio_stream_out that is embedded within the +// structure. +static struct submix_stream_out * audio_stream_out_get_submix_stream_out( + struct audio_stream_out * const stream) +{ + ALOG_ASSERT(stream); + return reinterpret_cast<struct submix_stream_out *>(reinterpret_cast<uint8_t *>(stream) - + offsetof(struct submix_stream_out, stream)); +} + +// Get a pointer to submix_stream_out given an audio_stream that is embedded within the structure. +static struct submix_stream_out * audio_stream_get_submix_stream_out( + struct audio_stream * const stream) +{ + ALOG_ASSERT(stream); + return audio_stream_out_get_submix_stream_out( + reinterpret_cast<struct audio_stream_out *>(stream)); +} + +// Get a pointer to submix_stream_in given an audio_stream_in that is embedded within the +// structure. +static struct submix_stream_in * audio_stream_in_get_submix_stream_in( + struct audio_stream_in * const stream) +{ + ALOG_ASSERT(stream); + return reinterpret_cast<struct submix_stream_in *>(reinterpret_cast<uint8_t *>(stream) - + offsetof(struct submix_stream_in, stream)); +} + +// Get a pointer to submix_stream_in given an audio_stream that is embedded within the structure. +static struct submix_stream_in * audio_stream_get_submix_stream_in( + struct audio_stream * const stream) +{ + ALOG_ASSERT(stream); + return audio_stream_in_get_submix_stream_in( + reinterpret_cast<struct audio_stream_in *>(stream)); +} + +// Get a pointer to submix_audio_device given a pointer to an audio_device that is embedded within +// the structure. +static struct submix_audio_device * audio_hw_device_get_submix_audio_device( + struct audio_hw_device *device) +{ + ALOG_ASSERT(device); + return reinterpret_cast<struct submix_audio_device *>(reinterpret_cast<uint8_t *>(device) - + offsetof(struct submix_audio_device, device)); +} + +// Get the number of channels referenced by the specified channel_mask. The channel_mask can +// reference either input or output channels. +uint32_t get_channel_count_from_mask(const audio_channel_mask_t channel_mask) { + if (audio_is_input_channel(channel_mask)) { + return popcount(channel_mask & AUDIO_CHANNEL_IN_ALL); + } else if (audio_is_output_channel(channel_mask)) { + return popcount(channel_mask & AUDIO_CHANNEL_OUT_ALL); + } + ALOGE("get_channel_count(): No channels specified in channel mask %x", channel_mask); + return 0; +} + +// Compare an audio_config with input channel mask and an audio_config with output channel mask +// returning false if they do *not* match, true otherwise. +static bool audio_config_compare(const audio_config * const input_config, + const audio_config * const output_config) +{ +#if !ENABLE_CHANNEL_CONVERSION + const uint32_t input_channels = get_channel_count_from_mask(input_config->channel_mask); + const uint32_t output_channels = get_channel_count_from_mask(output_config->channel_mask); + if (input_channels != output_channels) { + ALOGE("audio_config_compare() channel count mismatch input=%d vs. output=%d", + input_channels, output_channels); + return false; + } +#endif // !ENABLE_CHANNEL_CONVERSION +#if ENABLE_RESAMPLING + if (input_config->sample_rate != output_config->sample_rate && + get_channel_count_from_mask(input_config->channel_mask) != 1) { +#else + if (input_config->sample_rate != output_config->sample_rate) { +#endif // ENABLE_RESAMPLING + ALOGE("audio_config_compare() sample rate mismatch %ul vs. %ul", + input_config->sample_rate, output_config->sample_rate); + return false; + } + if (input_config->format != output_config->format) { + ALOGE("audio_config_compare() format mismatch %x vs. %x", + input_config->format, output_config->format); + return false; + } + // This purposely ignores offload_info as it's not required for the submix device. + return true; +} + +// If one doesn't exist, create a pipe for the submix audio device rsxadev of size +// buffer_size_frames and optionally associate "in" or "out" with the submix audio device. +static void submix_audio_device_create_pipe(struct submix_audio_device * const rsxadev, + const struct audio_config * const config, + const size_t buffer_size_frames, + const uint32_t buffer_period_count, + struct submix_stream_in * const in, + struct submix_stream_out * const out) +{ + ALOG_ASSERT(in || out); + ALOGV("submix_audio_device_create_pipe()"); + pthread_mutex_lock(&rsxadev->lock); + // Save a reference to the specified input or output stream and the associated channel + // mask. + if (in) { + rsxadev->input = in; + rsxadev->config.input_channel_mask = config->channel_mask; +#if ENABLE_RESAMPLING + rsxadev->config.input_sample_rate = config->sample_rate; + // If the output isn't configured yet, set the output sample rate to the maximum supported + // sample rate such that the smallest possible input buffer is created. + if (!rsxadev->output) { + rsxadev->config.output_sample_rate = 48000; + } +#endif // ENABLE_RESAMPLING + } + if (out) { + rsxadev->output = out; + rsxadev->config.output_channel_mask = config->channel_mask; +#if ENABLE_RESAMPLING + rsxadev->config.output_sample_rate = config->sample_rate; +#endif // ENABLE_RESAMPLING + } + // If a pipe isn't associated with the device, create one. + if (rsxadev->rsxSink == NULL || rsxadev->rsxSource == NULL) { + struct submix_config * const device_config = &rsxadev->config; + const NBAIO_Format format = Format_from_SR_C(config->sample_rate, + get_channel_count_from_mask(config->channel_mask), config->format); + const NBAIO_Format offers[1] = {format}; + size_t numCounterOffers = 0; + // Create a MonoPipe with optional blocking set to true. + MonoPipe* sink = new MonoPipe(buffer_size_frames, format, true /*writeCanBlock*/); + // Negotiation between the source and sink cannot fail as the device open operation + // creates both ends of the pipe using the same audio format. + ssize_t index = sink->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + MonoPipeReader* source = new MonoPipeReader(sink); + numCounterOffers = 0; + index = source->negotiate(offers, 1, NULL, numCounterOffers); + ALOG_ASSERT(index == 0); + ALOGV("submix_audio_device_create_pipe(): created pipe"); + + // Save references to the source and sink. + ALOG_ASSERT(rsxadev->rsxSink == NULL); + ALOG_ASSERT(rsxadev->rsxSource == NULL); + rsxadev->rsxSink = sink; + rsxadev->rsxSource = source; + // Store the sanitized audio format in the device so that it's possible to determine + // the format of the pipe source when opening the input device. + memcpy(&device_config->common, config, sizeof(device_config->common)); + device_config->buffer_size_frames = sink->maxFrames(); + device_config->buffer_period_size_frames = device_config->buffer_size_frames / + buffer_period_count; + if (in) device_config->pipe_frame_size = audio_stream_frame_size(&in->stream.common); + if (out) device_config->pipe_frame_size = audio_stream_frame_size(&out->stream.common); + SUBMIX_ALOGV("submix_audio_device_create_pipe(): pipe frame size %zd, pipe size %zd, " + "period size %zd", device_config->pipe_frame_size, + device_config->buffer_size_frames, device_config->buffer_period_size_frames); + } + pthread_mutex_unlock(&rsxadev->lock); +} + +// Release references to the sink and source. Input and output threads may maintain references +// to these objects via StrongPointer (sp<MonoPipe> and sp<MonoPipeReader>) which they can use +// before they shutdown. +static void submix_audio_device_release_pipe(struct submix_audio_device * const rsxadev) +{ + ALOGV("submix_audio_device_release_pipe()"); + rsxadev->rsxSink.clear(); + rsxadev->rsxSource.clear(); +} + +// Remove references to the specified input and output streams. When the device no longer +// references input and output streams destroy the associated pipe. +static void submix_audio_device_destroy_pipe(struct submix_audio_device * const rsxadev, + const struct submix_stream_in * const in, + const struct submix_stream_out * const out) +{ + MonoPipe* sink; + pthread_mutex_lock(&rsxadev->lock); + ALOGV("submix_audio_device_destroy_pipe()"); + ALOG_ASSERT(in == NULL || rsxadev->input == in); + ALOG_ASSERT(out == NULL || rsxadev->output == out); + if (in != NULL) { +#if ENABLE_LEGACY_INPUT_OPEN + const_cast<struct submix_stream_in*>(in)->ref_count--; + if (in->ref_count == 0) { + rsxadev->input = NULL; + } + ALOGV("submix_audio_device_destroy_pipe(): input ref_count %d", in->ref_count); +#else + rsxadev->input = NULL; +#endif // ENABLE_LEGACY_INPUT_OPEN + } + if (out != NULL) rsxadev->output = NULL; + if (rsxadev->input != NULL && rsxadev->output != NULL) { + submix_audio_device_release_pipe(rsxadev); + ALOGV("submix_audio_device_destroy_pipe(): pipe destroyed"); + } + pthread_mutex_unlock(&rsxadev->lock); +} + +// Sanitize the user specified audio config for a submix input / output stream. +static void submix_sanitize_config(struct audio_config * const config, const bool is_input_format) +{ + config->channel_mask = is_input_format ? get_supported_channel_in_mask(config->channel_mask) : + get_supported_channel_out_mask(config->channel_mask); + config->sample_rate = get_supported_sample_rate(config->sample_rate); + config->format = DEFAULT_FORMAT; +} + +// Verify a submix input or output stream can be opened. +static bool submix_open_validate(const struct submix_audio_device * const rsxadev, + pthread_mutex_t * const lock, + const struct audio_config * const config, + const bool opening_input) +{ + bool input_open; + bool output_open; + audio_config pipe_config; + + // Query the device for the current audio config and whether input and output streams are open. + pthread_mutex_lock(lock); + output_open = rsxadev->output != NULL; + input_open = rsxadev->input != NULL; + memcpy(&pipe_config, &rsxadev->config.common, sizeof(pipe_config)); + pthread_mutex_unlock(lock); + + // If the stream is already open, don't open it again. + if (opening_input ? !ENABLE_LEGACY_INPUT_OPEN && input_open : output_open) { + ALOGE("submix_open_validate(): %s stream already open.", opening_input ? "Input" : + "Output"); + return false; + } + + SUBMIX_ALOGV("submix_open_validate(): sample rate=%d format=%x " + "%s_channel_mask=%x", config->sample_rate, config->format, + opening_input ? "in" : "out", config->channel_mask); + + // If either stream is open, verify the existing audio config the pipe matches the user + // specified config. + if (input_open || output_open) { + const audio_config * const input_config = opening_input ? config : &pipe_config; + const audio_config * const output_config = opening_input ? &pipe_config : config; + // Get the channel mask of the open device. + pipe_config.channel_mask = + opening_input ? rsxadev->config.output_channel_mask : + rsxadev->config.input_channel_mask; + if (!audio_config_compare(input_config, output_config)) { + ALOGE("submix_open_validate(): Unsupported format."); + return false; + } + } + return true; +} + +// Calculate the maximum size of the pipe buffer in frames for the specified stream. +static size_t calculate_stream_pipe_size_in_frames(const struct audio_stream *stream, + const struct submix_config *config, + const size_t pipe_frames) +{ + const size_t stream_frame_size = audio_stream_frame_size(stream); + const size_t pipe_frame_size = config->pipe_frame_size; + const size_t max_frame_size = max(stream_frame_size, pipe_frame_size); + return (pipe_frames * config->pipe_frame_size) / max_frame_size; +} /* audio HAL functions */ static uint32_t out_get_sample_rate(const struct audio_stream *stream) { - const struct submix_stream_out *out = - reinterpret_cast<const struct submix_stream_out *>(stream); - uint32_t out_rate = out->dev->config.rate; - //ALOGV("out_get_sample_rate() returns %u", out_rate); + const struct submix_stream_out * const out = audio_stream_get_submix_stream_out( + const_cast<struct audio_stream *>(stream)); +#if ENABLE_RESAMPLING + const uint32_t out_rate = out->dev->config.output_sample_rate; +#else + const uint32_t out_rate = out->dev->config.common.sample_rate; +#endif // ENABLE_RESAMPLING + SUBMIX_ALOGV("out_get_sample_rate() returns %u", out_rate); return out_rate; } static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate) { - if ((rate != 44100) && (rate != 48000)) { + struct submix_stream_out * const out = audio_stream_get_submix_stream_out(stream); +#if ENABLE_RESAMPLING + // The sample rate of the stream can't be changed once it's set since this would change the + // output buffer size and hence break playback to the shared pipe. + if (rate != out->dev->config.output_sample_rate) { + ALOGE("out_set_sample_rate(rate=%u) resampling enabled can't change sample rate from " + "%u to %u", out->dev->config.output_sample_rate, rate); + return -ENOSYS; + } +#endif // ENABLE_RESAMPLING + if (!sample_rate_supported(rate)) { ALOGE("out_set_sample_rate(rate=%u) rate unsupported", rate); return -ENOSYS; } - struct submix_stream_out *out = reinterpret_cast<struct submix_stream_out *>(stream); - //ALOGV("out_set_sample_rate(rate=%u)", rate); - out->dev->config.rate = rate; + SUBMIX_ALOGV("out_set_sample_rate(rate=%u)", rate); + out->dev->config.common.sample_rate = rate; return 0; } static size_t out_get_buffer_size(const struct audio_stream *stream) { - const struct submix_stream_out *out = - reinterpret_cast<const struct submix_stream_out *>(stream); - const struct submix_config& config_out = out->dev->config; - size_t buffer_size = config_out.period_size * popcount(config_out.channel_mask) - * sizeof(int16_t); // only PCM 16bit - //ALOGV("out_get_buffer_size() returns %u, period size=%u", - // buffer_size, config_out.period_size); - return buffer_size; + const struct submix_stream_out * const out = audio_stream_get_submix_stream_out( + const_cast<struct audio_stream *>(stream)); + const struct submix_config * const config = &out->dev->config; + const size_t buffer_size_frames = calculate_stream_pipe_size_in_frames( + stream, config, config->buffer_period_size_frames); + const size_t buffer_size_bytes = buffer_size_frames * audio_stream_frame_size(stream); + SUBMIX_ALOGV("out_get_buffer_size() returns %zu bytes, %zu frames", + buffer_size_bytes, buffer_size_frames); + return buffer_size_bytes; } static audio_channel_mask_t out_get_channels(const struct audio_stream *stream) { - const struct submix_stream_out *out = - reinterpret_cast<const struct submix_stream_out *>(stream); - uint32_t channels = out->dev->config.channel_mask; - //ALOGV("out_get_channels() returns %08x", channels); - return channels; + const struct submix_stream_out * const out = audio_stream_get_submix_stream_out( + const_cast<struct audio_stream *>(stream)); + uint32_t channel_mask = out->dev->config.output_channel_mask; + SUBMIX_ALOGV("out_get_channels() returns %08x", channel_mask); + return channel_mask; } static audio_format_t out_get_format(const struct audio_stream *stream) { - return AUDIO_FORMAT_PCM_16_BIT; + const struct submix_stream_out * const out = audio_stream_get_submix_stream_out( + const_cast<struct audio_stream *>(stream)); + const audio_format_t format = out->dev->config.common.format; + SUBMIX_ALOGV("out_get_format() returns %x", format); + return format; } static int out_set_format(struct audio_stream *stream, audio_format_t format) { - if (format != AUDIO_FORMAT_PCM_16_BIT) { + const struct submix_stream_out * const out = audio_stream_get_submix_stream_out(stream); + if (format != out->dev->config.common.format) { + ALOGE("out_set_format(format=%x) format unsupported", format); return -ENOSYS; - } else { - return 0; } + SUBMIX_ALOGV("out_set_format(format=%x)", format); + return 0; } static int out_standby(struct audio_stream *stream) { + struct submix_audio_device * const rsxadev = audio_stream_get_submix_stream_out(stream)->dev; ALOGI("out_standby()"); - const struct submix_stream_out *out = reinterpret_cast<const struct submix_stream_out *>(stream); - - pthread_mutex_lock(&out->dev->lock); + pthread_mutex_lock(&rsxadev->lock); - out->dev->output_standby = true; + rsxadev->output_standby = true; - pthread_mutex_unlock(&out->dev->lock); + pthread_mutex_unlock(&rsxadev->lock); return 0; } static int out_dump(const struct audio_stream *stream, int fd) { + (void)stream; + (void)fd; return 0; } @@ -178,94 +632,130 @@ static int out_set_parameters(struct audio_stream *stream, const char *kvpairs) { int exiting = -1; AudioParameter parms = AudioParameter(String8(kvpairs)); + SUBMIX_ALOGV("out_set_parameters() kvpairs='%s'", kvpairs); + // FIXME this is using hard-coded strings but in the future, this functionality will be // converted to use audio HAL extensions required to support tunneling if ((parms.getInt(String8("exiting"), exiting) == NO_ERROR) && (exiting > 0)) { - const struct submix_stream_out *out = - reinterpret_cast<const struct submix_stream_out *>(stream); - - pthread_mutex_lock(&out->dev->lock); - + struct submix_audio_device * const rsxadev = + audio_stream_get_submix_stream_out(stream)->dev; + pthread_mutex_lock(&rsxadev->lock); { // using the sink - sp<MonoPipe> sink = out->dev->rsxSink.get(); - if (sink == 0) { - pthread_mutex_unlock(&out->dev->lock); + sp<MonoPipe> sink = rsxadev->rsxSink; + if (sink == NULL) { + pthread_mutex_unlock(&rsxadev->lock); return 0; } - ALOGI("shutdown"); + ALOGI("out_set_parameters(): shutdown"); sink->shutdown(true); } // done using the sink - - pthread_mutex_unlock(&out->dev->lock); + pthread_mutex_unlock(&rsxadev->lock); } - return 0; } static char * out_get_parameters(const struct audio_stream *stream, const char *keys) { + (void)stream; + (void)keys; return strdup(""); } static uint32_t out_get_latency(const struct audio_stream_out *stream) { - const struct submix_stream_out *out = - reinterpret_cast<const struct submix_stream_out *>(stream); - const struct submix_config * config_out = &(out->dev->config); - uint32_t latency = (MAX_PIPE_DEPTH_IN_FRAMES * 1000) / config_out->rate; - ALOGV("out_get_latency() returns %u", latency); - return latency; + const struct submix_stream_out * const out = audio_stream_out_get_submix_stream_out( + const_cast<struct audio_stream_out *>(stream)); + const struct submix_config * const config = &out->dev->config; + const size_t buffer_size_frames = calculate_stream_pipe_size_in_frames( + &stream->common, config, config->buffer_size_frames); +#if ENABLE_RESAMPLING + // Sample rate conversion occurs when data is read from the input so data in the buffer is + // at output_sample_rate Hz. + const uint32_t latency_ms = (buffer_size_frames * 1000) / config->output_sample_rate; +#else + const uint32_t latency_ms = (buffer_size_frames * 1000) / config->common.sample_rate; +#endif // ENABLE_RESAMPLING + SUBMIX_ALOGV("out_get_latency() returns %u ms, size in frames %zu, sample rate %u", + latency_ms, buffer_size_frames, config->common.sample_rate); + return latency_ms; } static int out_set_volume(struct audio_stream_out *stream, float left, float right) { + (void)stream; + (void)left; + (void)right; return -ENOSYS; } static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes) { - //ALOGV("out_write(bytes=%d)", bytes); + SUBMIX_ALOGV("out_write(bytes=%zd)", bytes); ssize_t written_frames = 0; - struct submix_stream_out *out = reinterpret_cast<struct submix_stream_out *>(stream); - const size_t frame_size = audio_stream_frame_size(&stream->common); + struct submix_stream_out * const out = audio_stream_out_get_submix_stream_out(stream); + struct submix_audio_device * const rsxadev = out->dev; const size_t frames = bytes / frame_size; - pthread_mutex_lock(&out->dev->lock); + pthread_mutex_lock(&rsxadev->lock); - out->dev->output_standby = false; + rsxadev->output_standby = false; - sp<MonoPipe> sink = out->dev->rsxSink.get(); - if (sink != 0) { + sp<MonoPipe> sink = rsxadev->rsxSink; + if (sink != NULL) { if (sink->isShutdown()) { sink.clear(); - pthread_mutex_unlock(&out->dev->lock); + pthread_mutex_unlock(&rsxadev->lock); + SUBMIX_ALOGV("out_write(): pipe shutdown, ignoring the write."); // the pipe has already been shutdown, this buffer will be lost but we must // simulate timing so we don't drain the output faster than realtime usleep(frames * 1000000 / out_get_sample_rate(&stream->common)); return bytes; } } else { - pthread_mutex_unlock(&out->dev->lock); + pthread_mutex_unlock(&rsxadev->lock); ALOGE("out_write without a pipe!"); ALOG_ASSERT("out_write without a pipe!"); return 0; } - pthread_mutex_unlock(&out->dev->lock); + // If the write to the sink would block when no input stream is present, flush enough frames + // from the pipe to make space to write the most recent data. + { + const size_t availableToWrite = sink->availableToWrite(); + sp<MonoPipeReader> source = rsxadev->rsxSource; + if (rsxadev->input == NULL && availableToWrite < frames) { + static uint8_t flush_buffer[64]; + const size_t flushBufferSizeFrames = sizeof(flush_buffer) / frame_size; + size_t frames_to_flush_from_source = frames - availableToWrite; + SUBMIX_ALOGV("out_write(): flushing %d frames from the pipe to avoid blocking", + frames_to_flush_from_source); + while (frames_to_flush_from_source) { + const size_t flush_size = min(frames_to_flush_from_source, flushBufferSizeFrames); + frames_to_flush_from_source -= flush_size; + source->read(flush_buffer, flush_size, AudioBufferProvider::kInvalidPTS); + } + } + } + + pthread_mutex_unlock(&rsxadev->lock); written_frames = sink->write(buffer, frames); +#if LOG_STREAMS_TO_FILES + if (out->log_fd >= 0) write(out->log_fd, buffer, written_frames * frame_size); +#endif // LOG_STREAMS_TO_FILES + if (written_frames < 0) { if (written_frames == (ssize_t)NEGOTIATE) { ALOGE("out_write() write to pipe returned NEGOTIATE"); - pthread_mutex_lock(&out->dev->lock); + pthread_mutex_lock(&rsxadev->lock); sink.clear(); - pthread_mutex_unlock(&out->dev->lock); + pthread_mutex_unlock(&rsxadev->lock); written_frames = 0; return 0; @@ -276,132 +766,193 @@ static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, } } - pthread_mutex_lock(&out->dev->lock); + pthread_mutex_lock(&rsxadev->lock); sink.clear(); - pthread_mutex_unlock(&out->dev->lock); + pthread_mutex_unlock(&rsxadev->lock); if (written_frames < 0) { ALOGE("out_write() failed writing to pipe with %zd", written_frames); return 0; - } else { - ALOGV("out_write() wrote %zu bytes)", written_frames * frame_size); - return written_frames * frame_size; } + const ssize_t written_bytes = written_frames * frame_size; + SUBMIX_ALOGV("out_write() wrote %zd bytes %zd frames", written_bytes, written_frames); + return written_bytes; } static int out_get_render_position(const struct audio_stream_out *stream, uint32_t *dsp_frames) { + (void)stream; + (void)dsp_frames; return -EINVAL; } static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect) { + (void)stream; + (void)effect; return 0; } static int out_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect) { + (void)stream; + (void)effect; return 0; } static int out_get_next_write_timestamp(const struct audio_stream_out *stream, int64_t *timestamp) { + (void)stream; + (void)timestamp; return -EINVAL; } /** audio_stream_in implementation **/ static uint32_t in_get_sample_rate(const struct audio_stream *stream) { - const struct submix_stream_in *in = reinterpret_cast<const struct submix_stream_in *>(stream); - //ALOGV("in_get_sample_rate() returns %u", in->dev->config.rate); - return in->dev->config.rate; + const struct submix_stream_in * const in = audio_stream_get_submix_stream_in( + const_cast<struct audio_stream*>(stream)); +#if ENABLE_RESAMPLING + const uint32_t rate = in->dev->config.input_sample_rate; +#else + const uint32_t rate = in->dev->config.common.sample_rate; +#endif // ENABLE_RESAMPLING + SUBMIX_ALOGV("in_get_sample_rate() returns %u", rate); + return rate; } static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate) { - return -ENOSYS; + const struct submix_stream_in * const in = audio_stream_get_submix_stream_in(stream); +#if ENABLE_RESAMPLING + // The sample rate of the stream can't be changed once it's set since this would change the + // input buffer size and hence break recording from the shared pipe. + if (rate != in->dev->config.input_sample_rate) { + ALOGE("in_set_sample_rate(rate=%u) resampling enabled can't change sample rate from " + "%u to %u", in->dev->config.input_sample_rate, rate); + return -ENOSYS; + } +#endif // ENABLE_RESAMPLING + if (!sample_rate_supported(rate)) { + ALOGE("in_set_sample_rate(rate=%u) rate unsupported", rate); + return -ENOSYS; + } + in->dev->config.common.sample_rate = rate; + SUBMIX_ALOGV("in_set_sample_rate() set %u", rate); + return 0; } static size_t in_get_buffer_size(const struct audio_stream *stream) { - const struct submix_stream_in *in = reinterpret_cast<const struct submix_stream_in *>(stream); - ALOGV("in_get_buffer_size() returns %zu", - in->dev->config.period_size * audio_stream_frame_size(stream)); - return in->dev->config.period_size * audio_stream_frame_size(stream); + const struct submix_stream_in * const in = audio_stream_get_submix_stream_in( + const_cast<struct audio_stream*>(stream)); + const struct submix_config * const config = &in->dev->config; + size_t buffer_size_frames = calculate_stream_pipe_size_in_frames( + stream, config, config->buffer_period_size_frames); +#if ENABLE_RESAMPLING + // Scale the size of the buffer based upon the maximum number of frames that could be returned + // given the ratio of output to input sample rate. + buffer_size_frames = (size_t)(((float)buffer_size_frames * + (float)config->input_sample_rate) / + (float)config->output_sample_rate); +#endif // ENABLE_RESAMPLING + const size_t buffer_size_bytes = buffer_size_frames * audio_stream_frame_size(stream); + SUBMIX_ALOGV("in_get_buffer_size() returns %zu bytes, %zu frames", buffer_size_bytes, + buffer_size_frames); + return buffer_size_bytes; } static audio_channel_mask_t in_get_channels(const struct audio_stream *stream) { - return AUDIO_CHANNEL_IN_STEREO; + const struct submix_stream_in * const in = audio_stream_get_submix_stream_in( + const_cast<struct audio_stream*>(stream)); + const audio_channel_mask_t channel_mask = in->dev->config.input_channel_mask; + SUBMIX_ALOGV("in_get_channels() returns %x", channel_mask); + return channel_mask; } static audio_format_t in_get_format(const struct audio_stream *stream) { - return AUDIO_FORMAT_PCM_16_BIT; + const struct submix_stream_in * const in = audio_stream_get_submix_stream_in( + const_cast<struct audio_stream*>(stream)); + const audio_format_t format = in->dev->config.common.format; + SUBMIX_ALOGV("in_get_format() returns %x", format); + return format; } static int in_set_format(struct audio_stream *stream, audio_format_t format) { - if (format != AUDIO_FORMAT_PCM_16_BIT) { + const struct submix_stream_in * const in = audio_stream_get_submix_stream_in(stream); + if (format != in->dev->config.common.format) { + ALOGE("in_set_format(format=%x) format unsupported", format); return -ENOSYS; - } else { - return 0; } + SUBMIX_ALOGV("in_set_format(format=%x)", format); + return 0; } static int in_standby(struct audio_stream *stream) { + struct submix_audio_device * const rsxadev = audio_stream_get_submix_stream_in(stream)->dev; ALOGI("in_standby()"); - const struct submix_stream_in *in = reinterpret_cast<const struct submix_stream_in *>(stream); - pthread_mutex_lock(&in->dev->lock); + pthread_mutex_lock(&rsxadev->lock); - in->dev->input_standby = true; + rsxadev->input_standby = true; - pthread_mutex_unlock(&in->dev->lock); + pthread_mutex_unlock(&rsxadev->lock); return 0; } static int in_dump(const struct audio_stream *stream, int fd) { + (void)stream; + (void)fd; return 0; } static int in_set_parameters(struct audio_stream *stream, const char *kvpairs) { + (void)stream; + (void)kvpairs; return 0; } static char * in_get_parameters(const struct audio_stream *stream, const char *keys) { + (void)stream; + (void)keys; return strdup(""); } static int in_set_gain(struct audio_stream_in *stream, float gain) { + (void)stream; + (void)gain; return 0; } static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t bytes) { - //ALOGV("in_read bytes=%u", bytes); - ssize_t frames_read = -1977; - struct submix_stream_in *in = reinterpret_cast<struct submix_stream_in *>(stream); + struct submix_stream_in * const in = audio_stream_in_get_submix_stream_in(stream); + struct submix_audio_device * const rsxadev = in->dev; + struct audio_config *format; const size_t frame_size = audio_stream_frame_size(&stream->common); const size_t frames_to_read = bytes / frame_size; - pthread_mutex_lock(&in->dev->lock); + SUBMIX_ALOGV("in_read bytes=%zu", bytes); + pthread_mutex_lock(&rsxadev->lock); const bool output_standby_transition = (in->output_standby != in->dev->output_standby); - in->output_standby = in->dev->output_standby; + in->output_standby = rsxadev->output_standby; - if (in->dev->input_standby || output_standby_transition) { - in->dev->input_standby = false; + if (rsxadev->input_standby || output_standby_transition) { + rsxadev->input_standby = false; // keep track of when we exit input standby (== first read == start "real recording") // or when we start recording silence, and reset projected time int rc = clock_gettime(CLOCK_MONOTONIC, &in->record_start_time); @@ -415,43 +966,156 @@ static ssize_t in_read(struct audio_stream_in *stream, void* buffer, { // about to read from audio source - sp<MonoPipeReader> source = in->dev->rsxSource.get(); - if (source == 0) { + sp<MonoPipeReader> source = rsxadev->rsxSource; + if (source == NULL) { ALOGE("no audio pipe yet we're trying to read!"); - pthread_mutex_unlock(&in->dev->lock); - usleep((bytes / frame_size) * 1000000 / in_get_sample_rate(&stream->common)); + pthread_mutex_unlock(&rsxadev->lock); + usleep(frames_to_read * 1000000 / in_get_sample_rate(&stream->common)); memset(buffer, 0, bytes); return bytes; } - pthread_mutex_unlock(&in->dev->lock); + pthread_mutex_unlock(&rsxadev->lock); // read the data from the pipe (it's non blocking) int attempts = 0; char* buff = (char*)buffer; +#if ENABLE_CHANNEL_CONVERSION + // Determine whether channel conversion is required. + const uint32_t input_channels = get_channel_count_from_mask( + rsxadev->config.input_channel_mask); + const uint32_t output_channels = get_channel_count_from_mask( + rsxadev->config.output_channel_mask); + if (input_channels != output_channels) { + SUBMIX_ALOGV("in_read(): %d output channels will be converted to %d " + "input channels", output_channels, input_channels); + // Only support 16-bit PCM channel conversion from mono to stereo or stereo to mono. + ALOG_ASSERT(rsxadev->config.common.format == AUDIO_FORMAT_PCM_16_BIT); + ALOG_ASSERT((input_channels == 1 && output_channels == 2) || + (input_channels == 2 && output_channels == 1)); + } +#endif // ENABLE_CHANNEL_CONVERSION + +#if ENABLE_RESAMPLING + const uint32_t input_sample_rate = in_get_sample_rate(&stream->common); + const uint32_t output_sample_rate = rsxadev->config.output_sample_rate; + const size_t resampler_buffer_size_frames = + sizeof(rsxadev->resampler_buffer) / sizeof(rsxadev->resampler_buffer[0]); + float resampler_ratio = 1.0f; + // Determine whether resampling is required. + if (input_sample_rate != output_sample_rate) { + resampler_ratio = (float)output_sample_rate / (float)input_sample_rate; + // Only support 16-bit PCM mono resampling. + // NOTE: Resampling is performed after the channel conversion step. + ALOG_ASSERT(rsxadev->config.common.format == AUDIO_FORMAT_PCM_16_BIT); + ALOG_ASSERT(get_channel_count_from_mask(rsxadev->config.input_channel_mask) == 1); + } +#endif // ENABLE_RESAMPLING + while ((remaining_frames > 0) && (attempts < MAX_READ_ATTEMPTS)) { - attempts++; - frames_read = source->read(buff, remaining_frames, AudioBufferProvider::kInvalidPTS); + ssize_t frames_read = -1977; + size_t read_frames = remaining_frames; +#if ENABLE_RESAMPLING + char* const saved_buff = buff; + if (resampler_ratio != 1.0f) { + // Calculate the number of frames from the pipe that need to be read to generate + // the data for the input stream read. + const size_t frames_required_for_resampler = (size_t)( + (float)read_frames * (float)resampler_ratio); + read_frames = min(frames_required_for_resampler, resampler_buffer_size_frames); + // Read into the resampler buffer. + buff = (char*)rsxadev->resampler_buffer; + } +#endif // ENABLE_RESAMPLING +#if ENABLE_CHANNEL_CONVERSION + if (output_channels == 1 && input_channels == 2) { + // Need to read half the requested frames since the converted output + // data will take twice the space (mono->stereo). + read_frames /= 2; + } +#endif // ENABLE_CHANNEL_CONVERSION + + SUBMIX_ALOGV("in_read(): frames available to read %zd", source->availableToRead()); + + frames_read = source->read(buff, read_frames, AudioBufferProvider::kInvalidPTS); + + SUBMIX_ALOGV("in_read(): frames read %zd", frames_read); + +#if ENABLE_CHANNEL_CONVERSION + // Perform in-place channel conversion. + // NOTE: In the following "input stream" refers to the data returned by this function + // and "output stream" refers to the data read from the pipe. + if (input_channels != output_channels && frames_read > 0) { + int16_t *data = (int16_t*)buff; + if (output_channels == 2 && input_channels == 1) { + // Offset into the output stream data in samples. + ssize_t output_stream_offset = 0; + for (ssize_t input_stream_frame = 0; input_stream_frame < frames_read; + input_stream_frame++, output_stream_offset += 2) { + // Average the content from both channels. + data[input_stream_frame] = ((int32_t)data[output_stream_offset] + + (int32_t)data[output_stream_offset + 1]) / 2; + } + } else if (output_channels == 1 && input_channels == 2) { + // Offset into the input stream data in samples. + ssize_t input_stream_offset = (frames_read - 1) * 2; + for (ssize_t output_stream_frame = frames_read - 1; output_stream_frame >= 0; + output_stream_frame--, input_stream_offset -= 2) { + const short sample = data[output_stream_frame]; + data[input_stream_offset] = sample; + data[input_stream_offset + 1] = sample; + } + } + } +#endif // ENABLE_CHANNEL_CONVERSION + +#if ENABLE_RESAMPLING + if (resampler_ratio != 1.0f) { + SUBMIX_ALOGV("in_read(): resampling %zd frames", frames_read); + const int16_t * const data = (int16_t*)buff; + int16_t * const resampled_buffer = (int16_t*)saved_buff; + // Resample with *no* filtering - if the data from the ouptut stream was really + // sampled at a different rate this will result in very nasty aliasing. + const float output_stream_frames = (float)frames_read; + size_t input_stream_frame = 0; + for (float output_stream_frame = 0.0f; + output_stream_frame < output_stream_frames && + input_stream_frame < remaining_frames; + output_stream_frame += resampler_ratio, input_stream_frame++) { + resampled_buffer[input_stream_frame] = data[(size_t)output_stream_frame]; + } + ALOG_ASSERT(input_stream_frame <= (ssize_t)resampler_buffer_size_frames); + SUBMIX_ALOGV("in_read(): resampler produced %zd frames", input_stream_frame); + frames_read = input_stream_frame; + buff = saved_buff; + } +#endif // ENABLE_RESAMPLING + if (frames_read > 0) { +#if LOG_STREAMS_TO_FILES + if (in->log_fd >= 0) write(in->log_fd, buff, frames_read * frame_size); +#endif // LOG_STREAMS_TO_FILES + remaining_frames -= frames_read; buff += frames_read * frame_size; - //ALOGV(" in_read (att=%d) got %ld frames, remaining=%u", - // attempts, frames_read, remaining_frames); + SUBMIX_ALOGV(" in_read (att=%d) got %zd frames, remaining=%zu", + attempts, frames_read, remaining_frames); } else { - //ALOGE(" in_read read returned %ld", frames_read); + attempts++; + SUBMIX_ALOGE(" in_read read returned %zd", frames_read); usleep(READ_ATTEMPT_SLEEP_MS * 1000); } } // done using the source - pthread_mutex_lock(&in->dev->lock); + pthread_mutex_lock(&rsxadev->lock); source.clear(); - pthread_mutex_unlock(&in->dev->lock); + pthread_mutex_unlock(&rsxadev->lock); } if (remaining_frames > 0) { - ALOGV(" remaining_frames = %zu", remaining_frames); - memset(((char*)buffer)+ bytes - (remaining_frames * frame_size), 0, - remaining_frames * frame_size); + const size_t remaining_bytes = remaining_frames * frame_size; + SUBMIX_ALOGV(" remaining_frames = %zu", remaining_frames); + memset(((char*)buffer)+ bytes - remaining_bytes, 0, remaining_bytes); } // compute how much we need to sleep after reading the data by comparing the wall clock with @@ -469,17 +1133,17 @@ static ssize_t in_read(struct audio_stream_in *stream, void* buffer, record_duration.tv_nsec += 1000000000; } - // read_counter_frames contains the number of frames that have been read since the beginning - // of recording (including this call): it's converted to usec and compared to how long we've - // been recording for, which gives us how long we must wait to sync the projected recording - // time, and the observed recording time + // read_counter_frames contains the number of frames that have been read since the + // beginning of recording (including this call): it's converted to usec and compared to + // how long we've been recording for, which gives us how long we must wait to sync the + // projected recording time, and the observed recording time. long projected_vs_observed_offset_us = ((int64_t)(in->read_counter_frames - (record_duration.tv_sec*sample_rate))) * 1000000 / sample_rate - (record_duration.tv_nsec / 1000); - ALOGV(" record duration %5lds %3ldms, will wait: %7ldus", + SUBMIX_ALOGV(" record duration %5lds %3ldms, will wait: %7ldus", record_duration.tv_sec, record_duration.tv_nsec/1000000, projected_vs_observed_offset_us); if (projected_vs_observed_offset_us > 0) { @@ -487,24 +1151,28 @@ static ssize_t in_read(struct audio_stream_in *stream, void* buffer, } } - - ALOGV("in_read returns %zu", bytes); + SUBMIX_ALOGV("in_read returns %zu", bytes); return bytes; } static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream) { + (void)stream; return 0; } static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect) { + (void)stream; + (void)effect; return 0; } static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect) { + (void)stream; + (void)effect; return 0; } @@ -515,19 +1183,26 @@ static int adev_open_output_stream(struct audio_hw_device *dev, struct audio_config *config, struct audio_stream_out **stream_out) { + struct submix_audio_device * const rsxadev = audio_hw_device_get_submix_audio_device(dev); ALOGV("adev_open_output_stream()"); - struct submix_audio_device *rsxadev = (struct submix_audio_device *)dev; struct submix_stream_out *out; - int ret; + (void)handle; + (void)devices; + (void)flags; - out = (struct submix_stream_out *)calloc(1, sizeof(struct submix_stream_out)); - if (!out) { - ret = -ENOMEM; - goto err_open; + *stream_out = NULL; + + // Make sure it's possible to open the device given the current audio config. + submix_sanitize_config(config, false); + if (!submix_open_validate(rsxadev, &rsxadev->lock, config, false)) { + ALOGE("adev_open_output_stream(): Unable to open output stream."); + return -EINVAL; } - pthread_mutex_lock(&rsxadev->lock); + out = (struct submix_stream_out *)calloc(1, sizeof(struct submix_stream_out)); + if (!out) return -ENOMEM; + // Initialize the function pointer tables (v-tables). out->stream.common.get_sample_rate = out_get_sample_rate; out->stream.common.set_sample_rate = out_set_sample_rate; out->stream.common.get_buffer_size = out_get_buffer_size; @@ -546,127 +1221,137 @@ static int adev_open_output_stream(struct audio_hw_device *dev, out->stream.get_render_position = out_get_render_position; out->stream.get_next_write_timestamp = out_get_next_write_timestamp; - config->channel_mask = AUDIO_CHANNEL_OUT_STEREO; - rsxadev->config.channel_mask = config->channel_mask; - - if ((config->sample_rate != 48000) && (config->sample_rate != 44100)) { - config->sample_rate = DEFAULT_RATE_HZ; + // If the sink has been shutdown, delete the pipe so that it's recreated. + pthread_mutex_lock(&rsxadev->lock); + if (rsxadev->rsxSink != NULL && rsxadev->rsxSink->isShutdown()) { + submix_audio_device_release_pipe(rsxadev); } - rsxadev->config.rate = config->sample_rate; - - config->format = AUDIO_FORMAT_PCM_16_BIT; - rsxadev->config.format = config->format; + pthread_mutex_unlock(&rsxadev->lock); - rsxadev->config.period_size = 1024; - rsxadev->config.period_count = 4; + // Store a pointer to the device from the output stream. out->dev = rsxadev; - + // Initialize the pipe. + ALOGV("adev_open_output_stream(): Initializing pipe"); + submix_audio_device_create_pipe(rsxadev, config, DEFAULT_PIPE_SIZE_IN_FRAMES, + DEFAULT_PIPE_PERIOD_COUNT, NULL, out); +#if LOG_STREAMS_TO_FILES + out->log_fd = open(LOG_STREAM_OUT_FILENAME, O_CREAT | O_TRUNC | O_WRONLY, + LOG_STREAM_FILE_PERMISSIONS); + ALOGE_IF(out->log_fd < 0, "adev_open_output_stream(): log file open failed %s", + strerror(errno)); + ALOGV("adev_open_output_stream(): log_fd = %d", out->log_fd); +#endif // LOG_STREAMS_TO_FILES + // Return the output stream. *stream_out = &out->stream; - // initialize pipe - { - ALOGV(" initializing pipe"); - const NBAIO_Format format = Format_from_SR_C(config->sample_rate, 2); - const NBAIO_Format offers[1] = {format}; - size_t numCounterOffers = 0; - // creating a MonoPipe with optional blocking set to true. - MonoPipe* sink = new MonoPipe(MAX_PIPE_DEPTH_IN_FRAMES, format, true/*writeCanBlock*/); - ssize_t index = sink->negotiate(offers, 1, NULL, numCounterOffers); - ALOG_ASSERT(index == 0); - MonoPipeReader* source = new MonoPipeReader(sink); - numCounterOffers = 0; - index = source->negotiate(offers, 1, NULL, numCounterOffers); - ALOG_ASSERT(index == 0); - rsxadev->rsxSink = sink; - rsxadev->rsxSource = source; - } - - pthread_mutex_unlock(&rsxadev->lock); - return 0; - -err_open: - *stream_out = NULL; - return ret; } static void adev_close_output_stream(struct audio_hw_device *dev, struct audio_stream_out *stream) { + struct submix_stream_out * const out = audio_stream_out_get_submix_stream_out(stream); ALOGV("adev_close_output_stream()"); - struct submix_audio_device *rsxadev = (struct submix_audio_device *)dev; - - pthread_mutex_lock(&rsxadev->lock); - - rsxadev->rsxSink.clear(); - rsxadev->rsxSource.clear(); - free(stream); - - pthread_mutex_unlock(&rsxadev->lock); + submix_audio_device_destroy_pipe(audio_hw_device_get_submix_audio_device(dev), NULL, out); +#if LOG_STREAMS_TO_FILES + if (out->log_fd >= 0) close(out->log_fd); +#endif // LOG_STREAMS_TO_FILES + free(out); } static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs) { + (void)dev; + (void)kvpairs; return -ENOSYS; } static char * adev_get_parameters(const struct audio_hw_device *dev, const char *keys) { + (void)dev; + (void)keys; return strdup("");; } static int adev_init_check(const struct audio_hw_device *dev) { ALOGI("adev_init_check()"); + (void)dev; return 0; } static int adev_set_voice_volume(struct audio_hw_device *dev, float volume) { + (void)dev; + (void)volume; return -ENOSYS; } static int adev_set_master_volume(struct audio_hw_device *dev, float volume) { + (void)dev; + (void)volume; return -ENOSYS; } static int adev_get_master_volume(struct audio_hw_device *dev, float *volume) { + (void)dev; + (void)volume; return -ENOSYS; } static int adev_set_master_mute(struct audio_hw_device *dev, bool muted) { + (void)dev; + (void)muted; return -ENOSYS; } static int adev_get_master_mute(struct audio_hw_device *dev, bool *muted) { + (void)dev; + (void)muted; return -ENOSYS; } static int adev_set_mode(struct audio_hw_device *dev, audio_mode_t mode) { + (void)dev; + (void)mode; return 0; } static int adev_set_mic_mute(struct audio_hw_device *dev, bool state) { + (void)dev; + (void)state; return -ENOSYS; } static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state) { + (void)dev; + (void)state; return -ENOSYS; } static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev, const struct audio_config *config) { - //### TODO correlate this with pipe parameters - return 4096; + if (audio_is_linear_pcm(config->format)) { + const size_t buffer_period_size_frames = + audio_hw_device_get_submix_audio_device(const_cast<struct audio_hw_device*>(dev))-> + config.buffer_period_size_frames; + const size_t frame_size_in_bytes = get_channel_count_from_mask(config->channel_mask) * + audio_bytes_per_sample(config->format); + const size_t buffer_size = buffer_period_size_frames * frame_size_in_bytes; + SUBMIX_ALOGV("out_get_buffer_size() returns %zu bytes, %zu frames", + buffer_size, buffer_period_size_frames); + return buffer_size; + } + return 0; } static int adev_open_input_stream(struct audio_hw_device *dev, @@ -675,87 +1360,99 @@ static int adev_open_input_stream(struct audio_hw_device *dev, struct audio_config *config, struct audio_stream_in **stream_in) { + struct submix_audio_device *rsxadev = audio_hw_device_get_submix_audio_device(dev); + struct submix_stream_in *in; ALOGI("adev_open_input_stream()"); + (void)handle; + (void)devices; - struct submix_audio_device *rsxadev = (struct submix_audio_device *)dev; - struct submix_stream_in *in; - int ret; + *stream_in = NULL; - in = (struct submix_stream_in *)calloc(1, sizeof(struct submix_stream_in)); - if (!in) { - ret = -ENOMEM; - goto err_open; + // Make sure it's possible to open the device given the current audio config. + submix_sanitize_config(config, true); + if (!submix_open_validate(rsxadev, &rsxadev->lock, config, true)) { + ALOGE("adev_open_input_stream(): Unable to open input stream."); + return -EINVAL; } +#if ENABLE_LEGACY_INPUT_OPEN pthread_mutex_lock(&rsxadev->lock); - - in->stream.common.get_sample_rate = in_get_sample_rate; - in->stream.common.set_sample_rate = in_set_sample_rate; - in->stream.common.get_buffer_size = in_get_buffer_size; - in->stream.common.get_channels = in_get_channels; - in->stream.common.get_format = in_get_format; - in->stream.common.set_format = in_set_format; - in->stream.common.standby = in_standby; - in->stream.common.dump = in_dump; - in->stream.common.set_parameters = in_set_parameters; - in->stream.common.get_parameters = in_get_parameters; - in->stream.common.add_audio_effect = in_add_audio_effect; - in->stream.common.remove_audio_effect = in_remove_audio_effect; - in->stream.set_gain = in_set_gain; - in->stream.read = in_read; - in->stream.get_input_frames_lost = in_get_input_frames_lost; - - config->channel_mask = AUDIO_CHANNEL_IN_STEREO; - rsxadev->config.channel_mask = config->channel_mask; - - if ((config->sample_rate != 48000) && (config->sample_rate != 44100)) { - config->sample_rate = DEFAULT_RATE_HZ; + in = rsxadev->input; + if (in) { + in->ref_count++; + sp<MonoPipe> sink = rsxadev->rsxSink; + ALOG_ASSERT(sink != NULL); + // If the sink has been shutdown, delete the pipe. + if (sink->isShutdown()) submix_audio_device_release_pipe(rsxadev); } - rsxadev->config.rate = config->sample_rate; - - config->format = AUDIO_FORMAT_PCM_16_BIT; - rsxadev->config.format = config->format; - - rsxadev->config.period_size = 1024; - rsxadev->config.period_count = 4; - - *stream_in = &in->stream; + pthread_mutex_unlock(&rsxadev->lock); +#else + in = NULL; +#endif // ENABLE_LEGACY_INPUT_OPEN - in->dev = rsxadev; + if (!in) { + in = (struct submix_stream_in *)calloc(1, sizeof(struct submix_stream_in)); + if (!in) return -ENOMEM; + in->ref_count = 1; + + // Initialize the function pointer tables (v-tables). + in->stream.common.get_sample_rate = in_get_sample_rate; + in->stream.common.set_sample_rate = in_set_sample_rate; + in->stream.common.get_buffer_size = in_get_buffer_size; + in->stream.common.get_channels = in_get_channels; + in->stream.common.get_format = in_get_format; + in->stream.common.set_format = in_set_format; + in->stream.common.standby = in_standby; + in->stream.common.dump = in_dump; + in->stream.common.set_parameters = in_set_parameters; + in->stream.common.get_parameters = in_get_parameters; + in->stream.common.add_audio_effect = in_add_audio_effect; + in->stream.common.remove_audio_effect = in_remove_audio_effect; + in->stream.set_gain = in_set_gain; + in->stream.read = in_read; + in->stream.get_input_frames_lost = in_get_input_frames_lost; + } + // Initialize the input stream. in->read_counter_frames = 0; in->output_standby = rsxadev->output_standby; - - pthread_mutex_unlock(&rsxadev->lock); + in->dev = rsxadev; + // Initialize the pipe. + submix_audio_device_create_pipe(rsxadev, config, DEFAULT_PIPE_SIZE_IN_FRAMES, + DEFAULT_PIPE_PERIOD_COUNT, in, NULL); +#if LOG_STREAMS_TO_FILES + in->log_fd = open(LOG_STREAM_IN_FILENAME, O_CREAT | O_TRUNC | O_WRONLY, + LOG_STREAM_FILE_PERMISSIONS); + ALOGE_IF(in->log_fd < 0, "adev_open_input_stream(): log file open failed %s", + strerror(errno)); + ALOGV("adev_open_input_stream(): log_fd = %d", in->log_fd); +#endif // LOG_STREAMS_TO_FILES + // Return the input stream. + *stream_in = &in->stream; return 0; - -err_open: - *stream_in = NULL; - return ret; } static void adev_close_input_stream(struct audio_hw_device *dev, - struct audio_stream_in *stream) + struct audio_stream_in *stream) { + struct submix_stream_in * const in = audio_stream_in_get_submix_stream_in(stream); ALOGV("adev_close_input_stream()"); - struct submix_audio_device *rsxadev = (struct submix_audio_device *)dev; - - pthread_mutex_lock(&rsxadev->lock); - - MonoPipe* sink = rsxadev->rsxSink.get(); - if (sink != NULL) { - ALOGI("shutdown"); - sink->shutdown(true); - } - - free(stream); - - pthread_mutex_unlock(&rsxadev->lock); + submix_audio_device_destroy_pipe(audio_hw_device_get_submix_audio_device(dev), in, NULL); +#if LOG_STREAMS_TO_FILES + if (in->log_fd >= 0) close(in->log_fd); +#endif // LOG_STREAMS_TO_FILES +#if ENABLE_LEGACY_INPUT_OPEN + if (in->ref_count == 0) free(in); +#else + free(in); +#endif // ENABLE_LEGACY_INPUT_OPEN } static int adev_dump(const audio_hw_device_t *device, int fd) { + (void)device; + (void)fd; return 0; } diff --git a/modules/camera/Android.mk b/modules/camera/Android.mk index fbe44c5..ae68ed5 100644 --- a/modules/camera/Android.mk +++ b/modules/camera/Android.mk @@ -26,14 +26,17 @@ LOCAL_C_INCLUDES += \ LOCAL_SRC_FILES := \ CameraHAL.cpp \ Camera.cpp \ + ExampleCamera.cpp \ Metadata.cpp \ Stream.cpp \ + VendorTags.cpp \ LOCAL_SHARED_LIBRARIES := \ libcamera_metadata \ libcutils \ liblog \ libsync \ + libutils \ LOCAL_CFLAGS += -Wall -Wextra -fvisibility=hidden diff --git a/modules/camera/Camera.cpp b/modules/camera/Camera.cpp index 973380e..de3ae78 100644 --- a/modules/camera/Camera.cpp +++ b/modules/camera/Camera.cpp @@ -15,11 +15,12 @@ */ #include <cstdlib> -#include <pthread.h> +#include <stdio.h> #include <hardware/camera3.h> #include <sync/sync.h> #include <system/camera_metadata.h> #include <system/graphics.h> +#include <utils/Mutex.h> #include "CameraHAL.h" #include "Metadata.h" #include "Stream.h" @@ -29,15 +30,12 @@ #include <cutils/log.h> #define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL) -#include <cutils/trace.h> -#include "ScopedTrace.h" +#include <utils/Trace.h> #include "Camera.h" #define CAMERA_SYNC_TIMEOUT 5000 // in msecs -#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) - namespace default_camera_hal { extern "C" { @@ -59,9 +57,7 @@ Camera::Camera(int id) mNumStreams(0), mSettings(NULL) { - pthread_mutex_init(&mMutex, NULL); - pthread_mutex_init(&mStaticInfoMutex, NULL); - + memset(&mTemplates, 0, sizeof(mTemplates)); memset(&mDevice, 0, sizeof(mDevice)); mDevice.common.tag = HARDWARE_DEVICE_TAG; mDevice.common.version = CAMERA_DEVICE_API_VERSION_3_0; @@ -72,17 +68,18 @@ Camera::Camera(int id) Camera::~Camera() { - pthread_mutex_destroy(&mMutex); - pthread_mutex_destroy(&mStaticInfoMutex); + if (mStaticInfo != NULL) { + free_camera_metadata(mStaticInfo); + } } int Camera::open(const hw_module_t *module, hw_device_t **device) { ALOGI("%s:%d: Opening camera device", __func__, mId); - CAMTRACE_CALL(); - pthread_mutex_lock(&mMutex); + ATRACE_CALL(); + android::Mutex::Autolock al(mDeviceLock); + if (mBusy) { - pthread_mutex_unlock(&mMutex); ALOGE("%s:%d: Error! Camera device already opened", __func__, mId); return -EBUSY; } @@ -91,217 +88,62 @@ int Camera::open(const hw_module_t *module, hw_device_t **device) mBusy = true; mDevice.common.module = const_cast<hw_module_t*>(module); *device = &mDevice.common; - - pthread_mutex_unlock(&mMutex); return 0; } int Camera::getInfo(struct camera_info *info) { + android::Mutex::Autolock al(mStaticInfoLock); + info->facing = CAMERA_FACING_FRONT; info->orientation = 0; info->device_version = mDevice.common.version; - - pthread_mutex_lock(&mStaticInfoMutex); if (mStaticInfo == NULL) { mStaticInfo = initStaticInfo(); } - pthread_mutex_unlock(&mStaticInfoMutex); - info->static_camera_characteristics = mStaticInfo; - return 0; } int Camera::close() { ALOGI("%s:%d: Closing camera device", __func__, mId); - CAMTRACE_CALL(); - pthread_mutex_lock(&mMutex); + ATRACE_CALL(); + android::Mutex::Autolock al(mDeviceLock); + if (!mBusy) { - pthread_mutex_unlock(&mMutex); ALOGE("%s:%d: Error! Camera device not open", __func__, mId); return -EINVAL; } // TODO: close camera dev nodes, etc mBusy = false; - - pthread_mutex_unlock(&mMutex); return 0; } int Camera::initialize(const camera3_callback_ops_t *callback_ops) { + int res; + ALOGV("%s:%d: callback_ops=%p", __func__, mId, callback_ops); mCallbackOps = callback_ops; - // Create standard settings templates - // 0 is invalid as template - mTemplates[0] = NULL; - // CAMERA3_TEMPLATE_PREVIEW = 1 - mTemplates[1] = new Metadata(ANDROID_CONTROL_MODE_OFF, - ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW); - // CAMERA3_TEMPLATE_STILL_CAPTURE = 2 - mTemplates[2] = new Metadata(ANDROID_CONTROL_MODE_OFF, - ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE); - // CAMERA3_TEMPLATE_VIDEO_RECORD = 3 - mTemplates[3] = new Metadata(ANDROID_CONTROL_MODE_OFF, - ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD); - // CAMERA3_TEMPLATE_VIDEO_SNAPSHOT = 4 - mTemplates[4] = new Metadata(ANDROID_CONTROL_MODE_OFF, - ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT); - // CAMERA3_TEMPLATE_STILL_ZERO_SHUTTER_LAG = 5 - mTemplates[5] = new Metadata(ANDROID_CONTROL_MODE_OFF, - ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG); - // Pre-generate metadata structures - for (int i = 1; i < CAMERA3_TEMPLATE_COUNT; i++) { - mTemplates[i]->generate(); - } - // TODO: create vendor templates + // per-device specific initialization + res = initDevice(); + if (res != 0) { + ALOGE("%s:%d: Failed to initialize device!", __func__, mId); + return res; + } return 0; } -camera_metadata_t *Camera::initStaticInfo() -{ - /* - * Setup static camera info. This will have to customized per camera - * device. - */ - Metadata m; - - /* android.control */ - int32_t android_control_ae_available_target_fps_ranges[] = {30, 30}; - m.addInt32(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, - ARRAY_SIZE(android_control_ae_available_target_fps_ranges), - android_control_ae_available_target_fps_ranges); - - int32_t android_control_ae_compensation_range[] = {-4, 4}; - m.addInt32(ANDROID_CONTROL_AE_COMPENSATION_RANGE, - ARRAY_SIZE(android_control_ae_compensation_range), - android_control_ae_compensation_range); - - camera_metadata_rational_t android_control_ae_compensation_step[] = {{2,1}}; - m.addRational(ANDROID_CONTROL_AE_COMPENSATION_STEP, - ARRAY_SIZE(android_control_ae_compensation_step), - android_control_ae_compensation_step); - - int32_t android_control_max_regions[] = {1}; - m.addInt32(ANDROID_CONTROL_MAX_REGIONS, - ARRAY_SIZE(android_control_max_regions), - android_control_max_regions); - - /* android.jpeg */ - int32_t android_jpeg_available_thumbnail_sizes[] = {0, 0, 128, 96}; - m.addInt32(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, - ARRAY_SIZE(android_jpeg_available_thumbnail_sizes), - android_jpeg_available_thumbnail_sizes); - - /* android.lens */ - float android_lens_info_available_focal_lengths[] = {1.0}; - m.addFloat(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, - ARRAY_SIZE(android_lens_info_available_focal_lengths), - android_lens_info_available_focal_lengths); - - /* android.request */ - int32_t android_request_max_num_output_streams[] = {0, 3, 1}; - m.addInt32(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, - ARRAY_SIZE(android_request_max_num_output_streams), - android_request_max_num_output_streams); - - /* android.scaler */ - int32_t android_scaler_available_formats[] = { - HAL_PIXEL_FORMAT_RAW_SENSOR, - HAL_PIXEL_FORMAT_BLOB, - HAL_PIXEL_FORMAT_RGBA_8888, - HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, - // These are handled by YCbCr_420_888 - // HAL_PIXEL_FORMAT_YV12, - // HAL_PIXEL_FORMAT_YCrCb_420_SP, - HAL_PIXEL_FORMAT_YCbCr_420_888}; - m.addInt32(ANDROID_SCALER_AVAILABLE_FORMATS, - ARRAY_SIZE(android_scaler_available_formats), - android_scaler_available_formats); - - int64_t android_scaler_available_jpeg_min_durations[] = {1}; - m.addInt64(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS, - ARRAY_SIZE(android_scaler_available_jpeg_min_durations), - android_scaler_available_jpeg_min_durations); - - int32_t android_scaler_available_jpeg_sizes[] = {640, 480}; - m.addInt32(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, - ARRAY_SIZE(android_scaler_available_jpeg_sizes), - android_scaler_available_jpeg_sizes); - - float android_scaler_available_max_digital_zoom[] = {1}; - m.addFloat(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, - ARRAY_SIZE(android_scaler_available_max_digital_zoom), - android_scaler_available_max_digital_zoom); - - int64_t android_scaler_available_processed_min_durations[] = {1}; - m.addInt64(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS, - ARRAY_SIZE(android_scaler_available_processed_min_durations), - android_scaler_available_processed_min_durations); - - int32_t android_scaler_available_processed_sizes[] = {640, 480}; - m.addInt32(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, - ARRAY_SIZE(android_scaler_available_processed_sizes), - android_scaler_available_processed_sizes); - - int64_t android_scaler_available_raw_min_durations[] = {1}; - m.addInt64(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS, - ARRAY_SIZE(android_scaler_available_raw_min_durations), - android_scaler_available_raw_min_durations); - - int32_t android_scaler_available_raw_sizes[] = {640, 480}; - m.addInt32(ANDROID_SCALER_AVAILABLE_RAW_SIZES, - ARRAY_SIZE(android_scaler_available_raw_sizes), - android_scaler_available_raw_sizes); - - /* android.sensor */ - - int32_t android_sensor_info_active_array_size[] = {0, 0, 640, 480}; - m.addInt32(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, - ARRAY_SIZE(android_sensor_info_active_array_size), - android_sensor_info_active_array_size); - - int32_t android_sensor_info_sensitivity_range[] = - {100, 1600}; - m.addInt32(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, - ARRAY_SIZE(android_sensor_info_sensitivity_range), - android_sensor_info_sensitivity_range); - - int64_t android_sensor_info_max_frame_duration[] = {30000000000}; - m.addInt64(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION, - ARRAY_SIZE(android_sensor_info_max_frame_duration), - android_sensor_info_max_frame_duration); - - float android_sensor_info_physical_size[] = {3.2, 2.4}; - m.addFloat(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, - ARRAY_SIZE(android_sensor_info_physical_size), - android_sensor_info_physical_size); - - int32_t android_sensor_info_pixel_array_size[] = {640, 480}; - m.addInt32(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, - ARRAY_SIZE(android_sensor_info_pixel_array_size), - android_sensor_info_pixel_array_size); - - int32_t android_sensor_orientation[] = {0}; - m.addInt32(ANDROID_SENSOR_ORIENTATION, - ARRAY_SIZE(android_sensor_orientation), - android_sensor_orientation); - - /* End of static camera characteristics */ - - return clone_camera_metadata(m.generate()); -} - int Camera::configureStreams(camera3_stream_configuration_t *stream_config) { camera3_stream_t *astream; Stream **newStreams = NULL; - CAMTRACE_CALL(); ALOGV("%s:%d: stream_config=%p", __func__, mId, stream_config); + ATRACE_CALL(); + android::Mutex::Autolock al(mDeviceLock); if (stream_config == NULL) { ALOGE("%s:%d: NULL stream configuration array", __func__, mId); @@ -317,8 +159,6 @@ int Camera::configureStreams(camera3_stream_configuration_t *stream_config) ALOGV("%s:%d: Number of Streams: %d", __func__, mId, stream_config->num_streams); - pthread_mutex_lock(&mMutex); - // Mark all current streams unused for now for (int i = 0; i < mNumStreams; i++) mStreams[i]->mReuse = false; @@ -356,14 +196,11 @@ int Camera::configureStreams(camera3_stream_configuration_t *stream_config) // Clear out last seen settings metadata setSettings(NULL); - - pthread_mutex_unlock(&mMutex); return 0; err_out: // Clean up temporary streams, preserve existing mStreams/mNumStreams destroyStreams(newStreams, stream_config->num_streams); - pthread_mutex_unlock(&mMutex); return -EINVAL; } @@ -469,15 +306,20 @@ int Camera::registerStreamBuffers(const camera3_stream_buffer_set_t *buf_set) return stream->registerBuffers(buf_set); } +bool Camera::isValidTemplateType(int type) +{ + return type < 1 || type >= CAMERA3_TEMPLATE_COUNT; +} + const camera_metadata_t* Camera::constructDefaultRequestSettings(int type) { ALOGV("%s:%d: type=%d", __func__, mId, type); - if (type < 1 || type >= CAMERA3_TEMPLATE_COUNT) { + if (!isValidTemplateType(type)) { ALOGE("%s:%d: Invalid template request type: %d", __func__, mId, type); return NULL; } - return mTemplates[type]->generate(); + return mTemplates[type]; } int Camera::processCaptureRequest(camera3_capture_request_t *request) @@ -485,7 +327,7 @@ int Camera::processCaptureRequest(camera3_capture_request_t *request) camera3_capture_result result; ALOGV("%s:%d: request=%p", __func__, mId, request); - CAMTRACE_CALL(); + ATRACE_CALL(); if (request == NULL) { ALOGE("%s:%d: NULL request recieved", __func__, mId); @@ -565,12 +407,6 @@ void Camera::setSettings(const camera_metadata_t *new_settings) mSettings = clone_camera_metadata(new_settings); } -bool Camera::isValidCaptureSettings(const camera_metadata_t* /*settings*/) -{ - // TODO: reject settings that cannot be captured - return true; -} - bool Camera::isValidReprocessSettings(const camera_metadata_t* /*settings*/) { // TODO: reject settings that cannot be reprocessed @@ -631,16 +467,65 @@ void Camera::notifyShutter(uint32_t frame_number, uint64_t timestamp) mCallbackOps->notify(mCallbackOps, &m); } -void Camera::getMetadataVendorTagOps(vendor_tag_query_ops_t *ops) +void Camera::dump(int fd) { - ALOGV("%s:%d: ops=%p", __func__, mId, ops); - // TODO: return vendor tag ops + ALOGV("%s:%d: Dumping to fd %d", __func__, mId, fd); + ATRACE_CALL(); + android::Mutex::Autolock al(mDeviceLock); + + dprintf(fd, "Camera ID: %d (Busy: %d)\n", mId, mBusy); + + // TODO: dump all settings + dprintf(fd, "Most Recent Settings: (%p)\n", mSettings); + + dprintf(fd, "Number of streams: %d\n", mNumStreams); + for (int i = 0; i < mNumStreams; i++) { + dprintf(fd, "Stream %d/%d:\n", i, mNumStreams); + mStreams[i]->dump(fd); + } } -void Camera::dump(int fd) +const char* Camera::templateToString(int type) { - ALOGV("%s:%d: Dumping to fd %d", __func__, mId, fd); - // TODO: dprintf all relevant state to fd + switch (type) { + case CAMERA3_TEMPLATE_PREVIEW: + return "CAMERA3_TEMPLATE_PREVIEW"; + case CAMERA3_TEMPLATE_STILL_CAPTURE: + return "CAMERA3_TEMPLATE_STILL_CAPTURE"; + case CAMERA3_TEMPLATE_VIDEO_RECORD: + return "CAMERA3_TEMPLATE_VIDEO_RECORD"; + case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT: + return "CAMERA3_TEMPLATE_VIDEO_SNAPSHOT"; + case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG: + return "CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG"; + } + // TODO: support vendor templates + return "Invalid template type!"; +} + +int Camera::setTemplate(int type, camera_metadata_t *settings) +{ + android::Mutex::Autolock al(mDeviceLock); + + if (!isValidTemplateType(type)) { + ALOGE("%s:%d: Invalid template request type: %d", __func__, mId, type); + return -EINVAL; + } + + if (mTemplates[type] != NULL) { + ALOGE("%s:%d: Setting already constructed template type %s(%d)", + __func__, mId, templateToString(type), type); + return -EINVAL; + } + + // Make a durable copy of the underlying metadata + mTemplates[type] = clone_camera_metadata(settings); + if (mTemplates[type] == NULL) { + ALOGE("%s:%d: Failed to clone metadata %p for template type %s(%d)", + __func__, mId, settings, templateToString(type), type); + return -EINVAL; + } + return 0; } extern "C" { @@ -680,28 +565,30 @@ static int process_capture_request(const camera3_device_t *dev, return camdev_to_camera(dev)->processCaptureRequest(request); } -static void get_metadata_vendor_tag_ops(const camera3_device_t *dev, - vendor_tag_query_ops_t *ops) +static void dump(const camera3_device_t *dev, int fd) { - camdev_to_camera(dev)->getMetadataVendorTagOps(ops); + camdev_to_camera(dev)->dump(fd); } -static void dump(const camera3_device_t *dev, int fd) +static int flush(const camera3_device_t*) { - camdev_to_camera(dev)->dump(fd); + ALOGE("%s: unimplemented.", __func__); + return -1; } + } // extern "C" const camera3_device_ops_t Camera::sOps = { - .initialize = default_camera_hal::initialize, - .configure_streams = default_camera_hal::configure_streams, + .initialize = default_camera_hal::initialize, + .configure_streams = default_camera_hal::configure_streams, .register_stream_buffers = default_camera_hal::register_stream_buffers, - .construct_default_request_settings = - default_camera_hal::construct_default_request_settings, + .construct_default_request_settings + = default_camera_hal::construct_default_request_settings, .process_capture_request = default_camera_hal::process_capture_request, - .get_metadata_vendor_tag_ops = - default_camera_hal::get_metadata_vendor_tag_ops, - .dump = default_camera_hal::dump + .get_metadata_vendor_tag_ops = NULL, + .dump = default_camera_hal::dump, + .flush = default_camera_hal::flush, + .reserved = {0}, }; } // namespace default_camera_hal diff --git a/modules/camera/Camera.h b/modules/camera/Camera.h index be672f9..0ceaf25 100644 --- a/modules/camera/Camera.h +++ b/modules/camera/Camera.h @@ -17,9 +17,9 @@ #ifndef CAMERA_H_ #define CAMERA_H_ -#include <pthread.h> #include <hardware/hardware.h> #include <hardware/camera3.h> +#include <utils/Mutex.h> #include "Metadata.h" #include "Stream.h" @@ -28,12 +28,14 @@ namespace default_camera_hal { // This is constructed when the HAL module is loaded, one per physical camera. // It is opened by the framework, and must be closed before it can be opened // again. +// This is an abstract class, containing all logic and data shared between all +// camera devices (front, back, etc) and common to the ISP. class Camera { public: // id is used to distinguish cameras. 0 <= id < NUM_CAMERAS. // module is a handle to the HAL module, used when the device is opened. Camera(int id); - ~Camera(); + virtual ~Camera(); // Common Camera Device Operations (see <hardware/camera_common.h>) int open(const hw_module_t *module, hw_device_t **device); @@ -46,15 +48,24 @@ class Camera { int registerStreamBuffers(const camera3_stream_buffer_set_t *buf_set); const camera_metadata_t *constructDefaultRequestSettings(int type); int processCaptureRequest(camera3_capture_request_t *request); - void getMetadataVendorTagOps(vendor_tag_query_ops_t *ops); void dump(int fd); - // Camera device handle returned to framework for use - camera3_device_t mDevice; + + protected: + // Initialize static camera characteristics for individual device + virtual camera_metadata_t *initStaticInfo() = 0; + // Verify settings are valid for a capture + virtual bool isValidCaptureSettings(const camera_metadata_t *) = 0; + // Separate initialization method for individual devices when opened + virtual int initDevice() = 0; + // Accessor used by initDevice() to set the templates' metadata + int setTemplate(int type, camera_metadata_t *static_info); + // Prettyprint template names + const char* templateToString(int type); private: - // Separate initialization method for static metadata - camera_metadata_t *initStaticInfo(); + // Camera device handle returned to framework for use + camera3_device_t mDevice; // Reuse a stream already created by this device Stream *reuseStream(camera3_stream_t *astream); // Destroy all streams in a stream array, and the array itself @@ -65,8 +76,6 @@ class Camera { void setupStreams(Stream **array, int count); // Copy new settings for re-use and clean up old settings. void setSettings(const camera_metadata_t *new_settings); - // Verify settings are valid for a capture - bool isValidCaptureSettings(const camera_metadata_t *settings); // Verify settings are valid for reprocessing an input buffer bool isValidReprocessSettings(const camera_metadata_t *settings); // Process an output buffer @@ -74,6 +83,8 @@ class Camera { camera3_stream_buffer_t *out); // Send a shutter notify message with start of exposure time void notifyShutter(uint32_t frame_number, uint64_t timestamp); + // Is type a valid template type (and valid index into mTemplates) + bool isValidTemplateType(int type); // Identifier used by framework to distinguish cameras const int mId; @@ -88,16 +99,16 @@ class Camera { // Methods used to call back into the framework const camera3_callback_ops_t *mCallbackOps; // Lock protecting the Camera object for modifications - pthread_mutex_t mMutex; + android::Mutex mDeviceLock; // Lock protecting only static camera characteristics, which may // be accessed without the camera device open - pthread_mutex_t mStaticInfoMutex; + android::Mutex mStaticInfoLock; // Array of handles to streams currently in use by the device Stream **mStreams; // Number of streams in mStreams int mNumStreams; // Static array of standard camera settings templates - Metadata *mTemplates[CAMERA3_TEMPLATE_COUNT]; + camera_metadata_t *mTemplates[CAMERA3_TEMPLATE_COUNT]; // Most recent request settings seen, memoized to be reused camera_metadata_t *mSettings; }; diff --git a/modules/camera/CameraHAL.cpp b/modules/camera/CameraHAL.cpp index dfbbe4c..b04cf0c 100644 --- a/modules/camera/CameraHAL.cpp +++ b/modules/camera/CameraHAL.cpp @@ -17,7 +17,8 @@ #include <cstdlib> #include <hardware/camera_common.h> #include <hardware/hardware.h> -#include "Camera.h" +#include "ExampleCamera.h" +#include "VendorTags.h" //#define LOG_NDEBUG 0 #define LOG_TAG "DefaultCameraHAL" @@ -38,25 +39,24 @@ namespace default_camera_hal { // Default Camera HAL has 2 cameras, front and rear. static CameraHAL gCameraHAL(2); +// Handle containing vendor tag functionality +static VendorTags gVendorTags; CameraHAL::CameraHAL(int num_cameras) : mNumberOfCameras(num_cameras), mCallbacks(NULL) { - int i; - // Allocate camera array and instantiate camera devices mCameras = new Camera*[mNumberOfCameras]; - for (i = 0; i < mNumberOfCameras; i++) { - mCameras[i] = new Camera(i); - } + // Rear camera + mCameras[0] = new ExampleCamera(0); + // Front camera + mCameras[1] = new ExampleCamera(1); } CameraHAL::~CameraHAL() { - int i; - - for (i = 0; i < mNumberOfCameras; i++) { + for (int i = 0; i < mNumberOfCameras; i++) { delete mCameras[i]; } delete [] mCameras; @@ -124,6 +124,41 @@ static int set_callbacks(const camera_module_callbacks_t *callbacks) return gCameraHAL.setCallbacks(callbacks); } +static int get_tag_count(const vendor_tag_ops_t* ops) +{ + return gVendorTags.getTagCount(ops); +} + +static void get_all_tags(const vendor_tag_ops_t* ops, uint32_t* tag_array) +{ + gVendorTags.getAllTags(ops, tag_array); +} + +static const char* get_section_name(const vendor_tag_ops_t* ops, uint32_t tag) +{ + return gVendorTags.getSectionName(ops, tag); +} + +static const char* get_tag_name(const vendor_tag_ops_t* ops, uint32_t tag) +{ + return gVendorTags.getTagName(ops, tag); +} + +static int get_tag_type(const vendor_tag_ops_t* ops, uint32_t tag) +{ + return gVendorTags.getTagType(ops, tag); +} + +static void get_vendor_tag_ops(vendor_tag_ops_t* ops) +{ + ALOGV("%s : ops=%p", __func__, ops); + ops->get_tag_count = get_tag_count; + ops->get_all_tags = get_all_tags; + ops->get_section_name = get_section_name; + ops->get_tag_name = get_tag_name; + ops->get_tag_type = get_tag_type; +} + static int open_dev(const hw_module_t* mod, const char* name, hw_device_t** dev) { return gCameraHAL.open(mod, name, dev); @@ -136,7 +171,7 @@ static hw_module_methods_t gCameraModuleMethods = { camera_module_t HAL_MODULE_INFO_SYM __attribute__ ((visibility("default"))) = { common : { tag : HARDWARE_MODULE_TAG, - module_api_version : CAMERA_MODULE_API_VERSION_2_0, + module_api_version : CAMERA_MODULE_API_VERSION_2_2, hal_api_version : HARDWARE_HAL_API_VERSION, id : CAMERA_HARDWARE_MODULE_ID, name : "Default Camera HAL", @@ -147,7 +182,9 @@ camera_module_t HAL_MODULE_INFO_SYM __attribute__ ((visibility("default"))) = { }, get_number_of_cameras : get_number_of_cameras, get_camera_info : get_camera_info, - set_callbacks : set_callbacks + set_callbacks : set_callbacks, + get_vendor_tag_ops : get_vendor_tag_ops, + reserved : {0}, }; } // extern "C" diff --git a/modules/camera/CameraHAL.h b/modules/camera/CameraHAL.h index ba0db4e..00c74e5 100644 --- a/modules/camera/CameraHAL.h +++ b/modules/camera/CameraHAL.h @@ -20,7 +20,9 @@ #include <cutils/bitops.h> #include <hardware/hardware.h> #include <hardware/camera_common.h> +#include <system/camera_vendor_tags.h> #include "Camera.h" +#include "VendorTags.h" namespace default_camera_hal { // CameraHAL contains all module state that isn't specific to an individual @@ -34,6 +36,7 @@ class CameraHAL { int getNumberOfCameras(); int getCameraInfo(int camera_id, struct camera_info *info); int setCallbacks(const camera_module_callbacks_t *callbacks); + void getVendorTagOps(vendor_tag_ops_t* ops); // Hardware Module Interface (see <hardware/hardware.h>) int open(const hw_module_t* mod, const char* name, hw_device_t** dev); diff --git a/modules/camera/ExampleCamera.cpp b/modules/camera/ExampleCamera.cpp new file mode 100644 index 0000000..ca28b99 --- /dev/null +++ b/modules/camera/ExampleCamera.cpp @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <system/camera_metadata.h> +#include "Camera.h" + +//#define LOG_NDEBUG 0 +#define LOG_TAG "ExampleCamera" +#include <cutils/log.h> + +#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL) +#include <utils/Trace.h> + +#include "ExampleCamera.h" + +#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) + +namespace default_camera_hal { + +ExampleCamera::ExampleCamera(int id) : Camera(id) +{ +} + +ExampleCamera::~ExampleCamera() +{ +} + +camera_metadata_t *ExampleCamera::initStaticInfo() +{ + /* + * Setup static camera info. This will have to customized per camera + * device. + */ + Metadata m; + + /* android.control */ + int32_t android_control_ae_available_target_fps_ranges[] = {30, 30}; + m.addInt32(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, + ARRAY_SIZE(android_control_ae_available_target_fps_ranges), + android_control_ae_available_target_fps_ranges); + + int32_t android_control_ae_compensation_range[] = {-4, 4}; + m.addInt32(ANDROID_CONTROL_AE_COMPENSATION_RANGE, + ARRAY_SIZE(android_control_ae_compensation_range), + android_control_ae_compensation_range); + + camera_metadata_rational_t android_control_ae_compensation_step[] = {{2,1}}; + m.addRational(ANDROID_CONTROL_AE_COMPENSATION_STEP, + ARRAY_SIZE(android_control_ae_compensation_step), + android_control_ae_compensation_step); + + int32_t android_control_max_regions[] = {/*AE*/ 1,/*AWB*/ 1,/*AF*/ 1}; + m.addInt32(ANDROID_CONTROL_MAX_REGIONS, + ARRAY_SIZE(android_control_max_regions), + android_control_max_regions); + + /* android.jpeg */ + int32_t android_jpeg_available_thumbnail_sizes[] = {0, 0, 128, 96}; + m.addInt32(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, + ARRAY_SIZE(android_jpeg_available_thumbnail_sizes), + android_jpeg_available_thumbnail_sizes); + + int32_t android_jpeg_max_size[] = {13 * 1024 * 1024}; // 13MB + m.addInt32(ANDROID_JPEG_MAX_SIZE, + ARRAY_SIZE(android_jpeg_max_size), + android_jpeg_max_size); + + /* android.lens */ + float android_lens_info_available_focal_lengths[] = {1.0}; + m.addFloat(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, + ARRAY_SIZE(android_lens_info_available_focal_lengths), + android_lens_info_available_focal_lengths); + + /* android.request */ + int32_t android_request_max_num_output_streams[] = {0, 3, 1}; + m.addInt32(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, + ARRAY_SIZE(android_request_max_num_output_streams), + android_request_max_num_output_streams); + + /* android.scaler */ + int32_t android_scaler_available_formats[] = { + HAL_PIXEL_FORMAT_RAW_SENSOR, + HAL_PIXEL_FORMAT_BLOB, + HAL_PIXEL_FORMAT_RGBA_8888, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, + // These are handled by YCbCr_420_888 + // HAL_PIXEL_FORMAT_YV12, + // HAL_PIXEL_FORMAT_YCrCb_420_SP, + HAL_PIXEL_FORMAT_YCbCr_420_888}; + m.addInt32(ANDROID_SCALER_AVAILABLE_FORMATS, + ARRAY_SIZE(android_scaler_available_formats), + android_scaler_available_formats); + + int64_t android_scaler_available_jpeg_min_durations[] = {1}; + m.addInt64(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS, + ARRAY_SIZE(android_scaler_available_jpeg_min_durations), + android_scaler_available_jpeg_min_durations); + + int32_t android_scaler_available_jpeg_sizes[] = {640, 480}; + m.addInt32(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, + ARRAY_SIZE(android_scaler_available_jpeg_sizes), + android_scaler_available_jpeg_sizes); + + float android_scaler_available_max_digital_zoom[] = {1}; + m.addFloat(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, + ARRAY_SIZE(android_scaler_available_max_digital_zoom), + android_scaler_available_max_digital_zoom); + + int64_t android_scaler_available_processed_min_durations[] = {1}; + m.addInt64(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS, + ARRAY_SIZE(android_scaler_available_processed_min_durations), + android_scaler_available_processed_min_durations); + + int32_t android_scaler_available_processed_sizes[] = {640, 480}; + m.addInt32(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, + ARRAY_SIZE(android_scaler_available_processed_sizes), + android_scaler_available_processed_sizes); + + int64_t android_scaler_available_raw_min_durations[] = {1}; + m.addInt64(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS, + ARRAY_SIZE(android_scaler_available_raw_min_durations), + android_scaler_available_raw_min_durations); + + int32_t android_scaler_available_raw_sizes[] = {640, 480}; + m.addInt32(ANDROID_SCALER_AVAILABLE_RAW_SIZES, + ARRAY_SIZE(android_scaler_available_raw_sizes), + android_scaler_available_raw_sizes); + + /* android.sensor */ + + int32_t android_sensor_info_active_array_size[] = {0, 0, 640, 480}; + m.addInt32(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, + ARRAY_SIZE(android_sensor_info_active_array_size), + android_sensor_info_active_array_size); + + int32_t android_sensor_info_sensitivity_range[] = + {100, 1600}; + m.addInt32(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, + ARRAY_SIZE(android_sensor_info_sensitivity_range), + android_sensor_info_sensitivity_range); + + int64_t android_sensor_info_max_frame_duration[] = {30000000000}; + m.addInt64(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION, + ARRAY_SIZE(android_sensor_info_max_frame_duration), + android_sensor_info_max_frame_duration); + + float android_sensor_info_physical_size[] = {3.2, 2.4}; + m.addFloat(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, + ARRAY_SIZE(android_sensor_info_physical_size), + android_sensor_info_physical_size); + + int32_t android_sensor_info_pixel_array_size[] = {640, 480}; + m.addInt32(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, + ARRAY_SIZE(android_sensor_info_pixel_array_size), + android_sensor_info_pixel_array_size); + + int32_t android_sensor_orientation[] = {0}; + m.addInt32(ANDROID_SENSOR_ORIENTATION, + ARRAY_SIZE(android_sensor_orientation), + android_sensor_orientation); + + /* End of static camera characteristics */ + + return clone_camera_metadata(m.get()); +} + +int ExampleCamera::initDevice() +{ + int res; + Metadata base; + + // Create standard settings templates from copies of base metadata + // TODO: use vendor tags in base metadata + res = base.add1UInt8(ANDROID_CONTROL_MODE, ANDROID_CONTROL_MODE_OFF); + if (res) + return res; + + // Use base settings to create all other templates and set them + res = setPreviewTemplate(base); + if (res) + return res; + res = setStillTemplate(base); + if (res) + return res; + res = setRecordTemplate(base); + if (res) + return res; + res = setSnapshotTemplate(base); + if (res) + return res; + res = setZslTemplate(base); + if (res) + return res; + + return 0; +} + +int ExampleCamera::setPreviewTemplate(Metadata m) +{ + // Setup default preview controls + int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT, + ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW); + + if (res) + return res; + // TODO: set fast auto-focus, auto-whitebalance, auto-exposure, auto flash + return setTemplate(CAMERA3_TEMPLATE_PREVIEW, m.get()); +} + +int ExampleCamera::setStillTemplate(Metadata m) +{ + int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT, + ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE); + // Setup default still capture controls + if (res) + return res; + // TODO: set fast auto-focus, auto-whitebalance, auto-exposure, auto flash + return setTemplate(CAMERA3_TEMPLATE_STILL_CAPTURE, m.get()); +} + +int ExampleCamera::setRecordTemplate(Metadata m) +{ + int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT, + ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD); + // Setup default video record controls + if (res) + return res; + // TODO: set slow auto-focus, auto-whitebalance, auto-exposure, flash off + return setTemplate(CAMERA3_TEMPLATE_VIDEO_RECORD, m.get()); +} + +int ExampleCamera::setSnapshotTemplate(Metadata m) +{ + int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT, + ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT); + // Setup default video snapshot controls + if (res) + return res; + // TODO: set slow auto-focus, auto-whitebalance, auto-exposure, flash off + return setTemplate(CAMERA3_TEMPLATE_VIDEO_SNAPSHOT, m.get()); +} + +int ExampleCamera::setZslTemplate(Metadata m) +{ + int res = m.add1UInt8(ANDROID_CONTROL_CAPTURE_INTENT, + ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG); + // Setup default zero shutter lag controls + if (res) + return res; + // TODO: set reprocessing parameters for zsl input queue + return setTemplate(CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG, m.get()); +} + +bool ExampleCamera::isValidCaptureSettings(const camera_metadata_t* settings) +{ + // TODO: reject settings that cannot be captured + return true; +} + +} // namespace default_camera_hal diff --git a/modules/camera/ExampleCamera.h b/modules/camera/ExampleCamera.h new file mode 100644 index 0000000..45c4a94 --- /dev/null +++ b/modules/camera/ExampleCamera.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef EXAMPLE_CAMERA_H_ +#define EXAMPLE_CAMERA_H_ + +#include <system/camera_metadata.h> +#include "Camera.h" + +namespace default_camera_hal { +// ExampleCamera is an example for a specific camera device. The Camera object +// contains all logic common between all cameras (e.g. front and back cameras), +// while a specific camera device (e.g. ExampleCamera) holds all specific +// metadata and logic about that device. +class ExampleCamera : public Camera { + public: + ExampleCamera(int id); + ~ExampleCamera(); + + private: + // Initialize static camera characteristics for individual device + camera_metadata_t *initStaticInfo(); + // Initialize whole device (templates/etc) when opened + int initDevice(); + // Initialize each template metadata controls + int setPreviewTemplate(Metadata m); + int setStillTemplate(Metadata m); + int setRecordTemplate(Metadata m); + int setSnapshotTemplate(Metadata m); + int setZslTemplate(Metadata m); + // Verify settings are valid for a capture with this device + bool isValidCaptureSettings(const camera_metadata_t* settings); +}; +} // namespace default_camera_hal + +#endif // CAMERA_H_ diff --git a/modules/camera/Metadata.cpp b/modules/camera/Metadata.cpp index d5854f9..f195534 100644 --- a/modules/camera/Metadata.cpp +++ b/modules/camera/Metadata.cpp @@ -14,7 +14,6 @@ * limitations under the License. */ -#include <pthread.h> #include <system/camera_metadata.h> //#define LOG_NDEBUG 0 @@ -22,102 +21,85 @@ #include <cutils/log.h> #define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL) -#include <cutils/trace.h> -#include "ScopedTrace.h" +#include <utils/Trace.h> #include "Metadata.h" namespace default_camera_hal { -Metadata::Metadata() - : mHead(NULL), - mTail(NULL), - mEntryCount(0), - mDataCount(0), - mGenerated(NULL), - mDirty(true) +Metadata::Metadata(): + mData(NULL) { - // NULL (default) pthread mutex attributes - pthread_mutex_init(&mMutex, NULL); } Metadata::~Metadata() { - Entry *current = mHead; + replace(NULL); +} - while (current != NULL) { - Entry *tmp = current; - current = current->mNext; - delete tmp; +void Metadata::replace(camera_metadata_t *m) +{ + if (m == mData) { + ALOGE("%s: Replacing metadata with itself?!", __func__); + return; } - - if (mGenerated != NULL) - free_camera_metadata(mGenerated); - - pthread_mutex_destroy(&mMutex); + if (mData) + free_camera_metadata(mData); + mData = m; } -Metadata::Metadata(uint8_t mode, uint8_t intent) - : mHead(NULL), - mTail(NULL), - mEntryCount(0), - mDataCount(0), - mGenerated(NULL), - mDirty(true) +int Metadata::init(const camera_metadata_t *metadata) { - pthread_mutex_init(&mMutex, NULL); - - if (validate(ANDROID_CONTROL_MODE, TYPE_BYTE, 1)) { - int res = add(ANDROID_CONTROL_MODE, 1, &mode); - if (res != 0) { - ALOGE("%s: Unable to add mode to template!", __func__); - } - } else { - ALOGE("%s: Invalid mode constructing template!", __func__); - } + camera_metadata_t* tmp; - if (validate(ANDROID_CONTROL_CAPTURE_INTENT, TYPE_BYTE, 1)) { - int res = add(ANDROID_CONTROL_CAPTURE_INTENT, 1, &intent); - if (res != 0) { - ALOGE("%s: Unable to add capture intent to template!", __func__); - } - } else { - ALOGE("%s: Invalid capture intent constructing template!", __func__); - } + if (!validate_camera_metadata_structure(metadata, NULL)) + return -EINVAL; + + tmp = clone_camera_metadata(metadata); + if (tmp == NULL) + return -EINVAL; + + replace(tmp); + return 0; } -int Metadata::addUInt8(uint32_t tag, int count, uint8_t *data) +int Metadata::addUInt8(uint32_t tag, int count, const uint8_t *data) { if (!validate(tag, TYPE_BYTE, count)) return -EINVAL; return add(tag, count, data); } -int Metadata::addInt32(uint32_t tag, int count, int32_t *data) +int Metadata::add1UInt8(uint32_t tag, const uint8_t data) +{ + return addUInt8(tag, 1, &data); +} + +int Metadata::addInt32(uint32_t tag, int count, const int32_t *data) { if (!validate(tag, TYPE_INT32, count)) return -EINVAL; return add(tag, count, data); } -int Metadata::addFloat(uint32_t tag, int count, float *data) +int Metadata::addFloat(uint32_t tag, int count, const float *data) { if (!validate(tag, TYPE_FLOAT, count)) return -EINVAL; return add(tag, count, data); } -int Metadata::addInt64(uint32_t tag, int count, int64_t *data) +int Metadata::addInt64(uint32_t tag, int count, const int64_t *data) { if (!validate(tag, TYPE_INT64, count)) return -EINVAL; return add(tag, count, data); } -int Metadata::addDouble(uint32_t tag, int count, double *data) +int Metadata::addDouble(uint32_t tag, int count, const double *data) { if (!validate(tag, TYPE_DOUBLE, count)) return -EINVAL; return add(tag, count, data); } int Metadata::addRational(uint32_t tag, int count, - camera_metadata_rational_t *data) + const camera_metadata_rational_t *data) { if (!validate(tag, TYPE_RATIONAL, count)) return -EINVAL; return add(tag, count, data); @@ -145,102 +127,48 @@ bool Metadata::validate(uint32_t tag, int tag_type, int count) return true; } -int Metadata::add(uint32_t tag, int count, void *tag_data) +int Metadata::add(uint32_t tag, int count, const void *tag_data) { + int res; + camera_metadata_t* tmp; int tag_type = get_camera_metadata_tag_type(tag); - size_t type_sz = camera_metadata_type_size[tag_type]; - - // Allocate array to hold new metadata - void *data = malloc(count * type_sz); - if (data == NULL) + size_t size = calculate_camera_metadata_entry_data_size(tag_type, count); + size_t entry_capacity = get_camera_metadata_entry_count(mData) + 1; + size_t data_capacity = get_camera_metadata_data_count(mData) + size; + + // Opportunistically attempt to add if metadata has room for it + if (!add_camera_metadata_entry(mData, tag, tag_data, count)) + return 0; + + // Double new dimensions to minimize future reallocations + tmp = allocate_camera_metadata(entry_capacity * 2, data_capacity * 2); + if (tmp == NULL) { + ALOGE("%s: Failed to allocate new metadata with %zu entries, %zu data", + __func__, entry_capacity, data_capacity); return -ENOMEM; - memcpy(data, tag_data, count * type_sz); - - pthread_mutex_lock(&mMutex); - mEntryCount++; - mDataCount += calculate_camera_metadata_entry_data_size(tag_type, count); - push(new Entry(tag, data, count)); - mDirty = true; - pthread_mutex_unlock(&mMutex); - return 0; -} - -camera_metadata_t* Metadata::generate() -{ - pthread_mutex_lock(&mMutex); - // Reuse if old generated metadata still valid - if (!mDirty && mGenerated != NULL) { - ALOGV("%s: Reusing generated metadata at %p", __func__, mGenerated); - goto out; } - // Destroy old metadata - if (mGenerated != NULL) { - ALOGV("%s: Freeing generated metadata at %p", __func__, mGenerated); - free_camera_metadata(mGenerated); - mGenerated = NULL; + // Append the current metadata to the new (empty) metadata + res = append_camera_metadata(tmp, mData); + if (res) { + ALOGE("%s: Failed to append old metadata %p to new %p", + __func__, mData, tmp); + return res; } - // Generate new metadata structure - ALOGV("%s: Generating new camera metadata structure, Entries:%d Data:%d", - __func__, mEntryCount, mDataCount); - mGenerated = allocate_camera_metadata(mEntryCount, mDataCount); - if (mGenerated == NULL) { - ALOGE("%s: Failed to allocate metadata (%d entries %d data)", - __func__, mEntryCount, mDataCount); - goto out; + // Add the remaining new item + res = add_camera_metadata_entry(tmp, tag, tag_data, count); + if (res) { + ALOGE("%s: Failed to add new entry (%d, %p, %d) to metadata %p", + __func__, tag, tag_data, count, tmp); + return res; } - // Walk list of entries adding each one to newly allocated metadata - for (Entry *current = mHead; current != NULL; current = current->mNext) { - int res = add_camera_metadata_entry(mGenerated, current->mTag, - current->mData, current->mCount); - if (res != 0) { - ALOGE("%s: Failed to add camera metadata: %d", __func__, res); - free_camera_metadata(mGenerated); - mGenerated = NULL; - goto out; - } - } - -out: - pthread_mutex_unlock(&mMutex); - return mGenerated; -} - -Metadata::Entry::Entry(uint32_t tag, void *data, int count) - : mNext(NULL), - mPrev(NULL), - mTag(tag), - mData(data), - mCount(count) -{ -} -void Metadata::push(Entry *e) -{ - if (mHead == NULL) { - mHead = mTail = e; - } else { - mTail->insertAfter(e); - mTail = e; - } -} - -Metadata::Entry::~Entry() -{ - if (mNext != NULL) - mNext->mPrev = mPrev; - if (mPrev != NULL) - mPrev->mNext = mNext; + replace(tmp); + return 0; } -void Metadata::Entry::insertAfter(Entry *e) +camera_metadata_t* Metadata::get() { - if (e == NULL) - return; - if (mNext != NULL) - mNext->mPrev = e; - e->mNext = mNext; - e->mPrev = this; - mNext = e; + return mData; } } // namespace default_camera_hal diff --git a/modules/camera/Metadata.h b/modules/camera/Metadata.h index 22d2f22..f432d04 100644 --- a/modules/camera/Metadata.h +++ b/modules/camera/Metadata.h @@ -17,10 +17,9 @@ #ifndef METADATA_H_ #define METADATA_H_ +#include <stdint.h> #include <hardware/camera3.h> -#include <hardware/gralloc.h> #include <system/camera_metadata.h> -#include <system/graphics.h> namespace default_camera_hal { // Metadata is a convenience class for dealing with libcamera_metadata @@ -28,51 +27,32 @@ class Metadata { public: Metadata(); ~Metadata(); - // Constructor used for request metadata templates - Metadata(uint8_t mode, uint8_t intent); + // Initialize with framework metadata + int init(const camera_metadata_t *metadata); - // Parse and add an entry - int addUInt8(uint32_t tag, int count, uint8_t *data); - int addInt32(uint32_t tag, int count, int32_t *data); - int addFloat(uint32_t tag, int count, float *data); - int addInt64(uint32_t tag, int count, int64_t *data); - int addDouble(uint32_t tag, int count, double *data); + // Parse and add an entry. Allocates and copies new storage for *data. + int addUInt8(uint32_t tag, int count, const uint8_t *data); + int add1UInt8(uint32_t tag, const uint8_t data); + int addInt32(uint32_t tag, int count, const int32_t *data); + int addFloat(uint32_t tag, int count, const float *data); + int addInt64(uint32_t tag, int count, const int64_t *data); + int addDouble(uint32_t tag, int count, const double *data); int addRational(uint32_t tag, int count, - camera_metadata_rational_t *data); - // Generate a camera_metadata structure and fill it with internal data - camera_metadata_t *generate(); + const camera_metadata_rational_t *data); + + // Get a handle to the current metadata + // This is not a durable handle, and may be destroyed by add*/init + camera_metadata_t* get(); private: + // Actual internal storage + camera_metadata_t* mData; + // Destroy old metadata and replace with new + void replace(camera_metadata_t *m); // Validate the tag, type and count for a metadata entry bool validate(uint32_t tag, int tag_type, int count); - // Add a verified tag with data to this Metadata structure - int add(uint32_t tag, int count, void *tag_data); - - class Entry { - public: - Entry(uint32_t tag, void *data, int count); - ~Entry(); - Entry *mNext; - Entry *mPrev; - const uint32_t mTag; - const void *mData; - const int mCount; - void insertAfter(Entry *e); - }; - // List ends - Entry *mHead; - Entry *mTail; - // Append entry to list - void push(Entry *e); - // Total of entries and entry data size - int mEntryCount; - int mDataCount; - // Save generated metadata, invalidated on update - camera_metadata_t *mGenerated; - // Flag to force metadata regeneration - bool mDirty; - // Lock protecting the Metadata object for modifications - pthread_mutex_t mMutex; + // Add a verified tag with data + int add(uint32_t tag, int count, const void *tag_data); }; } // namespace default_camera_hal diff --git a/modules/camera/ScopedTrace.h b/modules/camera/ScopedTrace.h deleted file mode 100644 index ed00570..0000000 --- a/modules/camera/ScopedTrace.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2013 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef CAMERA_SCOPED_TRACE_H -#define CAMERA_SCOPED_TRACE_H - -#include <stdint.h> -#include <cutils/trace.h> - -// See <cutils/trace.h> for more tracing macros. - -// CAMTRACE_NAME traces the beginning and end of the current scope. To trace -// the correct start and end times this macro should be declared first in the -// scope body. -#define CAMTRACE_NAME(name) ScopedTrace ___tracer(ATRACE_TAG, name) -// CAMTRACE_CALL is an ATRACE_NAME that uses the current function name. -#define CAMTRACE_CALL() CAMTRACE_NAME(__FUNCTION__) - -namespace default_camera_hal { - -class ScopedTrace { -public: -inline ScopedTrace(uint64_t tag, const char* name) - : mTag(tag) { - atrace_begin(mTag,name); -} - -inline ~ScopedTrace() { - atrace_end(mTag); -} - -private: - uint64_t mTag; -}; - -}; // namespace default_camera_hal - -#endif // CAMERA_SCOPED_TRACE_H diff --git a/modules/camera/Stream.cpp b/modules/camera/Stream.cpp index aae7adb..2db3ed2 100644 --- a/modules/camera/Stream.cpp +++ b/modules/camera/Stream.cpp @@ -14,18 +14,18 @@ * limitations under the License. */ -#include <pthread.h> +#include <stdio.h> #include <hardware/camera3.h> #include <hardware/gralloc.h> #include <system/graphics.h> +#include <utils/Mutex.h> //#define LOG_NDEBUG 0 #define LOG_TAG "Stream" #include <cutils/log.h> #define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL) -#include <cutils/trace.h> -#include "ScopedTrace.h" +#include <utils/Trace.h> #include "Stream.h" @@ -45,37 +45,32 @@ Stream::Stream(int id, camera3_stream_t *s) mBuffers(0), mNumBuffers(0) { - // NULL (default) pthread mutex attributes - pthread_mutex_init(&mMutex, NULL); } Stream::~Stream() { - pthread_mutex_lock(&mMutex); + android::Mutex::Autolock al(mLock); unregisterBuffers_L(); - pthread_mutex_unlock(&mMutex); } void Stream::setUsage(uint32_t usage) { - pthread_mutex_lock(&mMutex); + android::Mutex::Autolock al(mLock); if (usage != mUsage) { mUsage = usage; mStream->usage = usage; unregisterBuffers_L(); } - pthread_mutex_unlock(&mMutex); } void Stream::setMaxBuffers(uint32_t max_buffers) { - pthread_mutex_lock(&mMutex); + android::Mutex::Autolock al(mLock); if (max_buffers != mMaxBuffers) { mMaxBuffers = max_buffers; mStream->max_buffers = max_buffers; unregisterBuffers_L(); } - pthread_mutex_unlock(&mMutex); } int Stream::getType() @@ -95,6 +90,61 @@ bool Stream::isOutputType() mType == CAMERA3_STREAM_BIDIRECTIONAL; } +const char* Stream::typeToString(int type) +{ + switch (type) { + case CAMERA3_STREAM_INPUT: + return "CAMERA3_STREAM_INPUT"; + case CAMERA3_STREAM_OUTPUT: + return "CAMERA3_STREAM_OUTPUT"; + case CAMERA3_STREAM_BIDIRECTIONAL: + return "CAMERA3_STREAM_BIDIRECTIONAL"; + } + return "Invalid stream type!"; +} + +const char* Stream::formatToString(int format) +{ + // See <system/graphics.h> for full list + switch (format) { + case HAL_PIXEL_FORMAT_BGRA_8888: + return "BGRA 8888"; + case HAL_PIXEL_FORMAT_RGBA_8888: + return "RGBA 8888"; + case HAL_PIXEL_FORMAT_RGBX_8888: + return "RGBX 8888"; + case HAL_PIXEL_FORMAT_RGB_888: + return "RGB 888"; + case HAL_PIXEL_FORMAT_RGB_565: + return "RGB 565"; + case HAL_PIXEL_FORMAT_sRGB_A_8888: + return "sRGB A 8888"; + case HAL_PIXEL_FORMAT_sRGB_X_8888: + return "sRGB B 8888"; + case HAL_PIXEL_FORMAT_Y8: + return "Y8"; + case HAL_PIXEL_FORMAT_Y16: + return "Y16"; + case HAL_PIXEL_FORMAT_YV12: + return "YV12"; + case HAL_PIXEL_FORMAT_YCbCr_422_SP: + return "NV16"; + case HAL_PIXEL_FORMAT_YCrCb_420_SP: + return "NV21"; + case HAL_PIXEL_FORMAT_YCbCr_422_I: + return "YUY2"; + case HAL_PIXEL_FORMAT_RAW_SENSOR: + return "RAW SENSOR"; + case HAL_PIXEL_FORMAT_BLOB: + return "BLOB"; + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + return "IMPLEMENTATION DEFINED"; + case HAL_PIXEL_FORMAT_YCbCr_420_888: + return "FLEXIBLE YCbCr 420 888"; + } + return "Invalid stream format!"; +} + bool Stream::isRegistered() { return mRegistered; @@ -113,15 +163,15 @@ bool Stream::isValidReuseStream(int id, camera3_stream_t *s) return false; } if (s->stream_type != mType) { - // TODO: prettyprint type string - ALOGE("%s:%d: Mismatched type in reused stream. Got %d expect %d", - __func__, mId, s->stream_type, mType); + ALOGE("%s:%d: Mismatched type in reused stream. Got %s(%d) " + "expect %s(%d)", __func__, mId, typeToString(s->stream_type), + s->stream_type, typeToString(mType), mType); return false; } if (s->format != mFormat) { - // TODO: prettyprint format string - ALOGE("%s:%d: Mismatched format in reused stream. Got %d expect %d", - __func__, mId, s->format, mFormat); + ALOGE("%s:%d: Mismatched format in reused stream. Got %s(%d) " + "expect %s(%d)", __func__, mId, formatToString(s->format), + s->format, formatToString(mFormat), mFormat); return false; } if (s->width != mWidth) { @@ -139,7 +189,8 @@ bool Stream::isValidReuseStream(int id, camera3_stream_t *s) int Stream::registerBuffers(const camera3_stream_buffer_set_t *buf_set) { - CAMTRACE_CALL(); + ATRACE_CALL(); + android::Mutex::Autolock al(mLock); if (buf_set->stream != mStream) { ALOGE("%s:%d: Buffer set for invalid stream. Got %p expect %p", @@ -147,8 +198,6 @@ int Stream::registerBuffers(const camera3_stream_buffer_set_t *buf_set) return -EINVAL; } - pthread_mutex_lock(&mMutex); - mNumBuffers = buf_set->num_buffers; mBuffers = new buffer_handle_t*[mNumBuffers]; @@ -160,12 +209,10 @@ int Stream::registerBuffers(const camera3_stream_buffer_set_t *buf_set) } mRegistered = true; - pthread_mutex_unlock(&mMutex); - return 0; } -// This must only be called with mMutex held +// This must only be called with mLock held void Stream::unregisterBuffers_L() { mRegistered = false; @@ -174,4 +221,23 @@ void Stream::unregisterBuffers_L() // TODO: unregister buffers from hw } +void Stream::dump(int fd) +{ + android::Mutex::Autolock al(mLock); + + dprintf(fd, "Stream ID: %d (%p)\n", mId, mStream); + dprintf(fd, "Stream Type: %s (%d)\n", typeToString(mType), mType); + dprintf(fd, "Width: %"PRIu32" Height: %"PRIu32"\n", mWidth, mHeight); + dprintf(fd, "Stream Format: %s (%d)", formatToString(mFormat), mFormat); + // ToDo: prettyprint usage mask flags + dprintf(fd, "Gralloc Usage Mask: %#"PRIx32"\n", mUsage); + dprintf(fd, "Max Buffer Count: %"PRIu32"\n", mMaxBuffers); + dprintf(fd, "Buffers Registered: %s\n", mRegistered ? "true" : "false"); + dprintf(fd, "Number of Buffers: %"PRIu32"\n", mNumBuffers); + for (uint32_t i = 0; i < mNumBuffers; i++) { + dprintf(fd, "Buffer %"PRIu32"/%"PRIu32": %p\n", i, mNumBuffers, + mBuffers[i]); + } +} + } // namespace default_camera_hal diff --git a/modules/camera/Stream.h b/modules/camera/Stream.h index 34abd95..5efbc52 100644 --- a/modules/camera/Stream.h +++ b/modules/camera/Stream.h @@ -20,6 +20,7 @@ #include <hardware/camera3.h> #include <hardware/gralloc.h> #include <system/graphics.h> +#include <utils/Mutex.h> namespace default_camera_hal { // Stream represents a single input or output stream for a camera device. @@ -41,12 +42,15 @@ class Stream { bool isInputType(); bool isOutputType(); bool isRegistered(); + const char* typeToString(int type); + const char* formatToString(int format); + void dump(int fd); // This stream is being reused. Used in stream configuration passes bool mReuse; private: - // Clean up buffer state. must be called with mMutex held. + // Clean up buffer state. must be called with mLock held. void unregisterBuffers_L(); // The camera device id this stream belongs to @@ -72,7 +76,7 @@ class Stream { // Number of buffers in mBuffers unsigned int mNumBuffers; // Lock protecting the Stream object for modifications - pthread_mutex_t mMutex; + android::Mutex mLock; }; } // namespace default_camera_hal diff --git a/modules/camera/VendorTags.cpp b/modules/camera/VendorTags.cpp new file mode 100644 index 0000000..2c54648 --- /dev/null +++ b/modules/camera/VendorTags.cpp @@ -0,0 +1,188 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <system/camera_metadata.h> +#include "Metadata.h" + +//#define LOG_NDEBUG 0 +#define LOG_TAG "VendorTags" +#include <cutils/log.h> + +#define ATRACE_TAG (ATRACE_TAG_CAMERA | ATRACE_TAG_HAL) +#include <utils/Trace.h> + +#include "VendorTags.h" + +namespace default_camera_hal { + +// Internal representations of vendor tags for convenience. +// Other classes must access this data via public interfaces. +// Structured to be easy to extend and contain complexity. +namespace { +// Describes a single vendor tag entry +struct Entry { + const char* name; + uint8_t type; +}; +// Describes a vendor tag section +struct Section { + const char* name; + uint32_t start; + uint32_t end; + const Entry* tags; +}; + +// Entry arrays for each section +const Entry DemoWizardry[demo_wizardry_end - demo_wizardry_start] = { + [demo_wizardry_dimension_size - demo_wizardry_start] = + {"dimensionSize", TYPE_INT32}, + [demo_wizardry_dimensions - demo_wizardry_start] = + {"dimensions", TYPE_INT32}, + [demo_wizardry_familiar - demo_wizardry_start] = + {"familiar", TYPE_BYTE}, + [demo_wizardry_fire - demo_wizardry_start] = + {"fire", TYPE_RATIONAL} +}; + +const Entry DemoSorcery[demo_sorcery_end - demo_sorcery_start] = { + [demo_sorcery_difficulty - demo_sorcery_start] = + {"difficulty", TYPE_INT64}, + [demo_sorcery_light - demo_sorcery_start] = + {"light", TYPE_BYTE} +}; + +const Entry DemoMagic[demo_magic_end - demo_magic_start] = { + [demo_magic_card_trick - demo_magic_start] = + {"cardTrick", TYPE_DOUBLE}, + [demo_magic_levitation - demo_magic_start] = + {"levitation", TYPE_FLOAT} +}; + +// Array of all sections +const Section DemoSections[DEMO_SECTION_COUNT] = { + [DEMO_WIZARDRY] = { "demo.wizardry", + demo_wizardry_start, + demo_wizardry_end, + DemoWizardry }, + [DEMO_SORCERY] = { "demo.sorcery", + demo_sorcery_start, + demo_sorcery_end, + DemoSorcery }, + [DEMO_MAGIC] = { "demo.magic", + demo_magic_start, + demo_magic_end, + DemoMagic } +}; + +// Get a static handle to a specific vendor tag section +const Section* getSection(uint32_t tag) +{ + uint32_t section = (tag - vendor_section_start) >> 16; + + if (tag < vendor_section_start) { + ALOGE("%s: Tag 0x%x before vendor section", __func__, tag); + return NULL; + } + + if (section >= DEMO_SECTION_COUNT) { + ALOGE("%s: Tag 0x%x after vendor section", __func__, tag); + return NULL; + } + + return &DemoSections[section]; +} + +// Get a static handle to a specific vendor tag entry +const Entry* getEntry(uint32_t tag) +{ + const Section* section = getSection(tag); + int index; + + if (section == NULL) + return NULL; + + if (tag >= section->end) { + ALOGE("%s: Tag 0x%x outside section", __func__, tag); + return NULL; + } + + index = tag - section->start; + return §ion->tags[index]; +} +} // namespace + +VendorTags::VendorTags() + : mTagCount(0) +{ + for (int i = 0; i < DEMO_SECTION_COUNT; i++) { + mTagCount += DemoSections[i].end - DemoSections[i].start; + } +} + +VendorTags::~VendorTags() +{ +} + +int VendorTags::getTagCount(const vendor_tag_ops_t* ops) +{ + return mTagCount; +} + +void VendorTags::getAllTags(const vendor_tag_ops_t* ops, uint32_t* tag_array) +{ + if (tag_array == NULL) { + ALOGE("%s: NULL tag_array", __func__); + return; + } + + for (int i = 0; i < DEMO_SECTION_COUNT; i++) { + for (uint32_t tag = DemoSections[i].start; + tag < DemoSections[i].end; tag++) { + *tag_array++ = tag; + } + } +} + +const char* VendorTags::getSectionName(const vendor_tag_ops_t* ops, uint32_t tag) +{ + const Section* section = getSection(tag); + + if (section == NULL) + return NULL; + + return section->name; +} + +const char* VendorTags::getTagName(const vendor_tag_ops_t* ops, uint32_t tag) +{ + const Entry* entry = getEntry(tag); + + if (entry == NULL) + return NULL; + + return entry->name; +} + +int VendorTags::getTagType(const vendor_tag_ops_t* ops, uint32_t tag) +{ + const Entry* entry = getEntry(tag); + + if (entry == NULL) + return -1; + + return entry->type; +} +} // namespace default_camera_hal diff --git a/modules/camera/VendorTags.h b/modules/camera/VendorTags.h new file mode 100644 index 0000000..ecf777e --- /dev/null +++ b/modules/camera/VendorTags.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VENDOR_TAGS_H_ +#define VENDOR_TAGS_H_ + +#include <hardware/camera_common.h> +#include <system/camera_metadata.h> + +namespace default_camera_hal { + +// VendorTags contains all vendor-specific metadata tag functionality +class VendorTags { + public: + VendorTags(); + ~VendorTags(); + + // Vendor Tags Operations (see <hardware/camera_common.h>) + int getTagCount(const vendor_tag_ops_t* ops); + void getAllTags(const vendor_tag_ops_t* ops, uint32_t* tag_array); + const char* getSectionName(const vendor_tag_ops_t* ops, uint32_t tag); + const char* getTagName(const vendor_tag_ops_t* ops, uint32_t tag); + int getTagType(const vendor_tag_ops_t* ops, uint32_t tag); + + private: + // Total number of vendor tags + int mTagCount; +}; + +// Tag sections start at the beginning of vendor tags (0x8000_0000) +// See <system/camera_metadata.h> +enum { + DEMO_WIZARDRY, + DEMO_SORCERY, + DEMO_MAGIC, + DEMO_SECTION_COUNT +}; + +const uint32_t vendor_section_start = VENDOR_SECTION_START; + +// Each section starts at increments of 0x1_0000 +const uint32_t demo_wizardry_start = (DEMO_WIZARDRY + VENDOR_SECTION) << 16; +const uint32_t demo_sorcery_start = (DEMO_SORCERY + VENDOR_SECTION) << 16; +const uint32_t demo_magic_start = (DEMO_MAGIC + VENDOR_SECTION) << 16; + +// Vendor Tag values, start value begins each section +const uint32_t demo_wizardry_dimension_size = demo_wizardry_start; +const uint32_t demo_wizardry_dimensions = demo_wizardry_start + 1; +const uint32_t demo_wizardry_familiar = demo_wizardry_start + 2; +const uint32_t demo_wizardry_fire = demo_wizardry_start + 3; +const uint32_t demo_wizardry_end = demo_wizardry_start + 4; + +const uint32_t demo_sorcery_difficulty = demo_sorcery_start; +const uint32_t demo_sorcery_light = demo_sorcery_start + 1; +const uint32_t demo_sorcery_end = demo_sorcery_start + 2; + +const uint32_t demo_magic_card_trick = demo_magic_start; +const uint32_t demo_magic_levitation = demo_magic_start + 1; +const uint32_t demo_magic_end = demo_magic_start + 2; + +} // namespace default_camera_hal + +#endif // VENDOR_TAGS_H_ diff --git a/modules/fingerprint/Android.mk b/modules/fingerprint/Android.mk new file mode 100644 index 0000000..58c0a83 --- /dev/null +++ b/modules/fingerprint/Android.mk @@ -0,0 +1,25 @@ +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_MODULE := fingerprint.default +LOCAL_MODULE_RELATIVE_PATH := hw +LOCAL_SRC_FILES := fingerprint.c +LOCAL_SHARED_LIBRARIES := liblog +LOCAL_MODULE_TAGS := optional + +include $(BUILD_SHARED_LIBRARY) diff --git a/modules/fingerprint/fingerprint.c b/modules/fingerprint/fingerprint.c new file mode 100644 index 0000000..14dac12 --- /dev/null +++ b/modules/fingerprint/fingerprint.c @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#define LOG_TAG "FingerprintHal" + +#include <errno.h> +#include <string.h> +#include <cutils/log.h> +#include <hardware/hardware.h> +#include <hardware/fingerprint.h> + +static int fingerprint_close(hw_device_t *dev) +{ + if (dev) { + free(dev); + return 0; + } else { + return -1; + } +} + +static int fingerprint_enroll(struct fingerprint_device __unused *dev, + uint32_t __unused timeout_sec) { + return FINGERPRINT_ERROR; +} + +static int fingerprint_remove(struct fingerprint_device __unused *dev, + uint32_t __unused fingerprint_id) { + return FINGERPRINT_ERROR; +} + +static int set_notify_callback(struct fingerprint_device *dev, + fingerprint_notify_t notify) { + /* Decorate with locks */ + dev->notify = notify; + return FINGERPRINT_ERROR; +} + +static int fingerprint_open(const hw_module_t* module, const char __unused *id, + hw_device_t** device) +{ + if (device == NULL) { + ALOGE("NULL device on open"); + return -EINVAL; + } + + fingerprint_device_t *dev = malloc(sizeof(fingerprint_device_t)); + memset(dev, 0, sizeof(fingerprint_device_t)); + + dev->common.tag = HARDWARE_DEVICE_TAG; + dev->common.version = HARDWARE_MODULE_API_VERSION(1, 0); + dev->common.module = (struct hw_module_t*) module; + dev->common.close = fingerprint_close; + + dev->enroll = fingerprint_enroll; + dev->remove = fingerprint_remove; + dev->set_notify = set_notify_callback; + dev->notify = NULL; + + *device = (hw_device_t*) dev; + return 0; +} + +static struct hw_module_methods_t fingerprint_module_methods = { + .open = fingerprint_open, +}; + +fingerprint_module_t HAL_MODULE_INFO_SYM = { + .common = { + .tag = HARDWARE_MODULE_TAG, + .module_api_version = FINGERPRINT_MODULE_API_VERSION_1_0, + .hal_api_version = HARDWARE_HAL_API_VERSION, + .id = FINGERPRINT_HARDWARE_MODULE_ID, + .name = "Demo Fingerprint HAL", + .author = "The Android Open Source Project", + .methods = &fingerprint_module_methods, + }, +}; diff --git a/modules/tv_input/Android.mk b/modules/tv_input/Android.mk new file mode 100644 index 0000000..e8aa7fc --- /dev/null +++ b/modules/tv_input/Android.mk @@ -0,0 +1,24 @@ +# Copyright (C) 2014 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_MODULE_RELATIVE_PATH := hw +LOCAL_SHARED_LIBRARIES := libcutils liblog +LOCAL_SRC_FILES := tv_input.cpp +LOCAL_MODULE := tv_input.default +LOCAL_MODULE_TAGS := optional +include $(BUILD_SHARED_LIBRARY) diff --git a/modules/tv_input/tv_input.cpp b/modules/tv_input/tv_input.cpp new file mode 100644 index 0000000..bc02786 --- /dev/null +++ b/modules/tv_input/tv_input.cpp @@ -0,0 +1,141 @@ +/* + * Copyright 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <fcntl.h> +#include <errno.h> + +#include <cutils/log.h> +#include <cutils/native_handle.h> + +#include <hardware/tv_input.h> + +/*****************************************************************************/ + +typedef struct tv_input_private { + tv_input_device_t device; + + // Callback related data + const tv_input_callback_ops_t* callback; + void* callback_data; +} tv_input_private_t; + +static int tv_input_device_open(const struct hw_module_t* module, + const char* name, struct hw_device_t** device); + +static struct hw_module_methods_t tv_input_module_methods = { + open: tv_input_device_open +}; + +tv_input_module_t HAL_MODULE_INFO_SYM = { + common: { + tag: HARDWARE_MODULE_TAG, + version_major: 0, + version_minor: 1, + id: TV_INPUT_HARDWARE_MODULE_ID, + name: "Sample TV input module", + author: "The Android Open Source Project", + methods: &tv_input_module_methods, + } +}; + +/*****************************************************************************/ + +static int tv_input_initialize(struct tv_input_device* dev, + const tv_input_callback_ops_t* callback, void* data) +{ + if (dev == NULL || callback == NULL) { + return -EINVAL; + } + tv_input_private_t* priv = (tv_input_private_t*)dev; + if (priv->callback != NULL) { + return -EEXIST; + } + + priv->callback = callback; + priv->callback_data = data; + + return 0; +} + +static int tv_input_get_stream_configurations( + const struct tv_input_device*, int, int*, const tv_stream_config_t**) +{ + return -EINVAL; +} + +static int tv_input_open_stream(struct tv_input_device*, int, tv_stream_t*) +{ + return -EINVAL; +} + +static int tv_input_close_stream(struct tv_input_device*, int, int) +{ + return -EINVAL; +} + +static int tv_input_request_capture( + struct tv_input_device*, int, int, buffer_handle_t, uint32_t) +{ + return -EINVAL; +} + +static int tv_input_cancel_capture(struct tv_input_device*, int, int, uint32_t) +{ + return -EINVAL; +} + +/*****************************************************************************/ + +static int tv_input_device_close(struct hw_device_t *dev) +{ + tv_input_private_t* priv = (tv_input_private_t*)dev; + if (priv) { + free(priv); + } + return 0; +} + +/*****************************************************************************/ + +static int tv_input_device_open(const struct hw_module_t* module, + const char* name, struct hw_device_t** device) +{ + int status = -EINVAL; + if (!strcmp(name, TV_INPUT_DEFAULT_DEVICE)) { + tv_input_private_t* dev = (tv_input_private_t*)malloc(sizeof(*dev)); + + /* initialize our state here */ + memset(dev, 0, sizeof(*dev)); + + /* initialize the procs */ + dev->device.common.tag = HARDWARE_DEVICE_TAG; + dev->device.common.version = TV_INPUT_DEVICE_API_VERSION_0_1; + dev->device.common.module = const_cast<hw_module_t*>(module); + dev->device.common.close = tv_input_device_close; + + dev->device.initialize = tv_input_initialize; + dev->device.get_stream_configurations = + tv_input_get_stream_configurations; + dev->device.open_stream = tv_input_open_stream; + dev->device.close_stream = tv_input_close_stream; + dev->device.request_capture = tv_input_request_capture; + dev->device.cancel_capture = tv_input_cancel_capture; + + *device = &dev->device.common; + status = 0; + } + return status; +} diff --git a/modules/usbaudio/audio_hw.c b/modules/usbaudio/audio_hw.c index 24a2d63..7b2a4f1 100644 --- a/modules/usbaudio/audio_hw.c +++ b/modules/usbaudio/audio_hw.c @@ -18,12 +18,13 @@ /*#define LOG_NDEBUG 0*/ #include <errno.h> +#include <inttypes.h> #include <pthread.h> #include <stdint.h> -#include <sys/time.h> #include <stdlib.h> +#include <sys/time.h> -#include <cutils/log.h> +#include <log/log.h> #include <cutils/str_parms.h> #include <cutils/properties.h> @@ -33,65 +34,398 @@ #include <tinyalsa/asoundlib.h> -struct pcm_config pcm_config = { +/* This is the default configuration to hand to The Framework on the initial + * adev_open_output_stream(). Actual device attributes will be used on the subsequent + * adev_open_output_stream() after the card and device number have been set in out_set_parameters() + */ +#define OUT_PERIOD_SIZE 1024 +#define OUT_PERIOD_COUNT 4 +#define OUT_SAMPLING_RATE 44100 + +struct pcm_config default_alsa_out_config = { + .channels = 2, + .rate = OUT_SAMPLING_RATE, + .period_size = OUT_PERIOD_SIZE, + .period_count = OUT_PERIOD_COUNT, + .format = PCM_FORMAT_S16_LE, +}; + +/* + * Input defaults. See comment above. + */ +#define IN_PERIOD_SIZE 1024 +#define IN_PERIOD_COUNT 4 +#define IN_SAMPLING_RATE 44100 + +struct pcm_config default_alsa_in_config = { .channels = 2, - .rate = 44100, - .period_size = 1024, - .period_count = 4, + .rate = IN_SAMPLING_RATE, + .period_size = IN_PERIOD_SIZE, + .period_count = IN_PERIOD_COUNT, .format = PCM_FORMAT_S16_LE, + .start_threshold = 1, + .stop_threshold = (IN_PERIOD_SIZE * IN_PERIOD_COUNT), }; struct audio_device { struct audio_hw_device hw_device; pthread_mutex_t lock; /* see note below on mutex acquisition order */ - int card; - int device; + + /* output */ + int out_card; + int out_device; + + /* input */ + int in_card; + int in_device; + bool standby; }; struct stream_out { struct audio_stream_out stream; + pthread_mutex_t lock; /* see note below on mutex acquisition order */ + struct pcm *pcm; /* state of the stream */ + bool standby; + + struct audio_device *dev; /* hardware information */ + + void * conversion_buffer; /* any conversions are put into here + * they could come from here too if + * there was a previous conversion */ + size_t conversion_buffer_size; /* in bytes */ +}; + +/* + * Output Configuration Cache + * FIXME(pmclean) This is not reentrant. Should probably be moved into the stream structure + * but that will involve changes in The Framework. + */ +static struct pcm_config cached_output_hardware_config; +static bool output_hardware_config_is_cached = false; + +struct stream_in { + struct audio_stream_in stream; + pthread_mutex_t lock; /* see note below on mutex acquisition order */ struct pcm *pcm; bool standby; struct audio_device *dev; + + struct audio_config hal_pcm_config; + +// struct resampler_itfe *resampler; +// struct resampler_buffer_provider buf_provider; + + int read_status; + + // We may need to read more data from the device in order to data reduce to 16bit, 4chan */ + void * conversion_buffer; /* any conversions are put into here + * they could come from here too if + * there was a previous conversion */ + size_t conversion_buffer_size; /* in bytes */ }; -/** - * NOTE: when multiple mutexes have to be acquired, always respect the - * following order: hw device > out stream +/* + * Input Configuration Cache + * FIXME(pmclean) This is not reentrant. Should probably be moved into the stream structure + * but that will involve changes in The Framework. */ +static struct pcm_config cached_input_hardware_config; +static bool input_hardware_config_is_cached = false; -/* Helper functions */ +/* + * Utility + */ +/* + * Translates from ALSA format ID to ANDROID_AUDIO_CORE format ID + * (see master/system/core/include/core/audio.h) + * TODO(pmclean) Replace with audio_format_from_pcm_format() (in hardware/audio_alsaops.h). + * post-integration. + */ +static audio_format_t alsa_to_fw_format_id(int alsa_fmt_id) +{ + switch (alsa_fmt_id) { + case PCM_FORMAT_S8: + return AUDIO_FORMAT_PCM_8_BIT; -/* must be called with hw device and output stream mutexes locked */ -static int start_output_stream(struct stream_out *out) + case PCM_FORMAT_S24_3LE: + //TODO(pmclean) make sure this is the 'right' sort of 24-bit + return AUDIO_FORMAT_PCM_8_24_BIT; + + case PCM_FORMAT_S32_LE: + case PCM_FORMAT_S24_LE: + return AUDIO_FORMAT_PCM_32_BIT; + } + + return AUDIO_FORMAT_PCM_16_BIT; +} + +/* + * Data Conversions + */ +/* + * Convert a buffer of PCM16LE samples to packed (3-byte) PCM24LE samples. + * in_buff points to the buffer of PCM16 samples + * num_in_samples size of input buffer in SAMPLES + * out_buff points to the buffer to receive converted PCM24 LE samples. + * returns + * the number of BYTES of output data. + * We are doing this since we *always* present to The Framework as A PCM16LE device, but need to + * support PCM24_3LE (24-bit, packed). + * NOTE: + * We're just filling the low-order byte of the PCM24LE samples with 0. + * This conversion is safe to do in-place (in_buff == out_buff). + * TODO(pmclean, hung) Move this to a utilities module. + */ +static size_t convert_16_to_24_3(const short * in_buff, size_t num_in_samples, unsigned char * out_buff) { + /* + * Move from back to front so that the conversion can be done in-place + * i.e. in_buff == out_buff + */ + int in_buff_size_in_bytes = num_in_samples * 2; + /* we need 3 bytes in the output for every 2 bytes in the input */ + int out_buff_size_in_bytes = ((3 * in_buff_size_in_bytes) / 2); + unsigned char* dst_ptr = out_buff + out_buff_size_in_bytes - 1; + size_t src_smpl_index; + const unsigned char* src_ptr = ((const unsigned char *)in_buff) + in_buff_size_in_bytes - 1; + for (src_smpl_index = 0; src_smpl_index < num_in_samples; src_smpl_index++) { + *dst_ptr-- = *src_ptr--; /* hi-byte */ + *dst_ptr-- = *src_ptr--; /* low-byte */ + /*TODO(pmclean) - we might want to consider dithering the lowest byte. */ + *dst_ptr-- = 0; /* zero-byte */ + } + + /* return number of *bytes* generated */ + return out_buff_size_in_bytes; +} + +/* + * Convert a buffer of packed (3-byte) PCM24LE samples to PCM16LE samples. + * in_buff points to the buffer of PCM24LE samples + * num_in_samples size of input buffer in SAMPLES + * out_buff points to the buffer to receive converted PCM16LE LE samples. + * returns + * the number of BYTES of output data. + * We are doing this since we *always* present to The Framework as A PCM16LE device, but need to + * support PCM24_3LE (24-bit, packed). + * NOTE: + * We're just filling the low-order byte of the PCM24LE samples with 0. + * This conversion is safe to do in-place (in_buff == out_buff). + * TODO(pmclean, hung) Move this to a utilities module. + */ +static size_t convert_24_3_to_16(const unsigned char * in_buff, size_t num_in_samples, short * out_buff) { + /* + * Move from front to back so that the conversion can be done in-place + * i.e. in_buff == out_buff + */ + /* we need 2 bytes in the output for every 3 bytes in the input */ + unsigned char* dst_ptr = (unsigned char*)out_buff; + const unsigned char* src_ptr = in_buff; + size_t src_smpl_index; + for (src_smpl_index = 0; src_smpl_index < num_in_samples; src_smpl_index++) { + src_ptr++; /* lowest-(skip)-byte */ + *dst_ptr++ = *src_ptr++; /* low-byte */ + *dst_ptr++ = *src_ptr++; /* high-byte */ + } + + /* return number of *bytes* generated: */ + return num_in_samples * 2; +} + +/* + * Convert a buffer of N-channel, interleaved PCM16 samples to M-channel PCM16 channels + * (where N < M). + * in_buff points to the buffer of PCM16 samples + * in_buff_channels Specifies the number of channels in the input buffer. + * out_buff points to the buffer to receive converted PCM16 samples. + * out_buff_channels Specifies the number of channels in the output buffer. + * num_in_samples size of input buffer in SAMPLES + * returns + * the number of BYTES of output data. + * NOTE + * channels > N are filled with silence. + * This conversion is safe to do in-place (in_buff == out_buff) + * We are doing this since we *always* present to The Framework as STEREO device, but need to + * support 4-channel devices. + * TODO(pmclean, hung) Move this to a utilities module. + */ +static size_t expand_channels_16(const short* in_buff, int in_buff_chans, + short* out_buff, int out_buff_chans, + size_t num_in_samples) { + /* + * Move from back to front so that the conversion can be done in-place + * i.e. in_buff == out_buff + * NOTE: num_in_samples * out_buff_channels must be an even multiple of in_buff_chans + */ + int num_out_samples = (num_in_samples * out_buff_chans)/in_buff_chans; + + short* dst_ptr = out_buff + num_out_samples - 1; + size_t src_index; + const short* src_ptr = in_buff + num_in_samples - 1; + int num_zero_chans = out_buff_chans - in_buff_chans; + for (src_index = 0; src_index < num_in_samples; src_index += in_buff_chans) { + int dst_offset; + for(dst_offset = 0; dst_offset < num_zero_chans; dst_offset++) { + *dst_ptr-- = 0; + } + for(; dst_offset < out_buff_chans; dst_offset++) { + *dst_ptr-- = *src_ptr--; + } + } + + /* return number of *bytes* generated */ + return num_out_samples * sizeof(short); +} + +/* + * Convert a buffer of N-channel, interleaved PCM16 samples to M-channel PCM16 channels + * (where N > M). + * in_buff points to the buffer of PCM16 samples + * in_buff_channels Specifies the number of channels in the input buffer. + * out_buff points to the buffer to receive converted PCM16 samples. + * out_buff_channels Specifies the number of channels in the output buffer. + * num_in_samples size of input buffer in SAMPLES + * returns + * the number of BYTES of output data. + * NOTE + * channels > N are thrown away. + * This conversion is safe to do in-place (in_buff == out_buff) + * We are doing this since we *always* present to The Framework as STEREO device, but need to + * support 4-channel devices. + * TODO(pmclean, hung) Move this to a utilities module. + */ +static size_t contract_channels_16(short* in_buff, int in_buff_chans, + short* out_buff, int out_buff_chans, + size_t num_in_samples) { + /* + * Move from front to back so that the conversion can be done in-place + * i.e. in_buff == out_buff + * NOTE: num_in_samples * out_buff_channels must be an even multiple of in_buff_chans + */ + int num_out_samples = (num_in_samples * out_buff_chans)/in_buff_chans; + + int num_skip_samples = in_buff_chans - out_buff_chans; + + short* dst_ptr = out_buff; + short* src_ptr = in_buff; + size_t src_index; + for (src_index = 0; src_index < num_in_samples; src_index += in_buff_chans) { + int dst_offset; + for(dst_offset = 0; dst_offset < out_buff_chans; dst_offset++) { + *dst_ptr++ = *src_ptr++; + } + src_ptr += num_skip_samples; + } + + /* return number of *bytes* generated */ + return num_out_samples * sizeof(short); +} + +/* + * ALSA Utilities + */ +/* + * gets the ALSA bit-format flag from a bits-per-sample value. + * TODO(pmclean, hung) Move this to a utilities module. + */ +static int bits_to_alsa_format(unsigned int bits_per_sample, int default_format) { - struct audio_device *adev = out->dev; - int i; + enum pcm_format format; + for (format = PCM_FORMAT_S16_LE; format < PCM_FORMAT_MAX; format++) { + if (pcm_format_to_bits(format) == bits_per_sample) { + return format; + } + } + return default_format; +} - if ((adev->card < 0) || (adev->device < 0)) - return -EINVAL; +static void log_pcm_params(struct pcm_params * alsa_hw_params) { + ALOGV("usb:audio_hw - PCM_PARAM_SAMPLE_BITS min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_SAMPLE_BITS), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_SAMPLE_BITS)); + ALOGV("usb:audio_hw - PCM_PARAM_FRAME_BITS min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_FRAME_BITS), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_FRAME_BITS)); + ALOGV("usb:audio_hw - PCM_PARAM_CHANNELS min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_CHANNELS)); + ALOGV("usb:audio_hw - PCM_PARAM_RATE min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_RATE)); + ALOGV("usb:audio_hw - PCM_PARAM_PERIOD_TIME min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_TIME), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_TIME)); + ALOGV("usb:audio_hw - PCM_PARAM_PERIOD_SIZE min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_SIZE), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_SIZE)); + ALOGV("usb:audio_hw - PCM_PARAM_PERIOD_BYTES min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIOD_BYTES), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIOD_BYTES)); + ALOGV("usb:audio_hw - PCM_PARAM_PERIODS min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIODS), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIODS)); + ALOGV("usb:audio_hw - PCM_PARAM_BUFFER_TIME min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_BUFFER_TIME), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_BUFFER_TIME)); + ALOGV("usb:audio_hw - PCM_PARAM_BUFFER_SIZE min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_BUFFER_SIZE), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_BUFFER_SIZE)); + ALOGV("usb:audio_hw - PCM_PARAM_BUFFER_BYTES min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_BUFFER_BYTES), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_BUFFER_BYTES)); + ALOGV("usb:audio_hw - PCM_PARAM_TICK_TIME min:%u, max:%u", + pcm_params_get_min(alsa_hw_params, PCM_PARAM_TICK_TIME), + pcm_params_get_max(alsa_hw_params, PCM_PARAM_TICK_TIME)); +} - out->pcm = pcm_open(adev->card, adev->device, PCM_OUT, &pcm_config); +/* + * Reads and decodes configuration info from the specified ALSA card/device + */ +static int read_alsa_device_config(int card, int device, int io_type, struct pcm_config * config) +{ + ALOGV("usb:audio_hw - read_alsa_device_config(c:%d d:%d t:0x%X)",card, device, io_type); - if (out->pcm && !pcm_is_ready(out->pcm)) { - ALOGE("pcm_open() failed: %s", pcm_get_error(out->pcm)); - pcm_close(out->pcm); - return -ENOMEM; + if (card < 0 || device < 0) { + return -EINVAL; + } + + struct pcm_params * alsa_hw_params = pcm_params_get(card, device, io_type); + if (alsa_hw_params == NULL) { + return -EINVAL; } + /* + * This Logging will be useful when testing new USB devices. + */ + /* log_pcm_params(alsa_hw_params); */ + + config->channels = pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS); + config->rate = pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE); + config->period_size = pcm_params_get_max(alsa_hw_params, PCM_PARAM_PERIODS); + config->period_count = pcm_params_get_min(alsa_hw_params, PCM_PARAM_PERIODS); + + unsigned int bits_per_sample = pcm_params_get_min(alsa_hw_params, PCM_PARAM_SAMPLE_BITS); + config->format = bits_to_alsa_format(bits_per_sample, PCM_FORMAT_S16_LE); + return 0; } -/* API functions */ +/* + * HAl Functions + */ +/** + * NOTE: when multiple mutexes have to be acquired, always respect the + * following order: hw device > out stream + */ +/* Helper functions */ static uint32_t out_get_sample_rate(const struct audio_stream *stream) { - return pcm_config.rate; + return cached_output_hardware_config.rate; } static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate) @@ -101,17 +435,22 @@ static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate) static size_t out_get_buffer_size(const struct audio_stream *stream) { - return pcm_config.period_size * - audio_stream_frame_size((struct audio_stream *)stream); + return cached_output_hardware_config.period_size * audio_stream_frame_size(stream); } static uint32_t out_get_channels(const struct audio_stream *stream) { + // Always Stero for now. We will do *some* conversions in this HAL. + // TODO(pmclean) When AudioPolicyManager & AudioFlinger supports arbitrary channels + // rewrite this to return the ACTUAL channel format return AUDIO_CHANNEL_OUT_STEREO; } static audio_format_t out_get_format(const struct audio_stream *stream) { + // Always return 16-bit PCM. We will do *some* conversions in this HAL. + // TODO(pmclean) When AudioPolicyManager & AudioFlinger supports arbitrary PCM formats + // rewrite this to return the ACTUAL data format return AUDIO_FORMAT_PCM_16_BIT; } @@ -146,49 +485,158 @@ static int out_dump(const struct audio_stream *stream, int fd) static int out_set_parameters(struct audio_stream *stream, const char *kvpairs) { + ALOGV("usb:audio_hw::out out_set_parameters() keys:%s", kvpairs); + struct stream_out *out = (struct stream_out *)stream; struct audio_device *adev = out->dev; struct str_parms *parms; char value[32]; - int ret; + int param_val; int routing = 0; + int ret_value = 0; parms = str_parms_create_str(kvpairs); pthread_mutex_lock(&adev->lock); - ret = str_parms_get_str(parms, "card", value, sizeof(value)); - if (ret >= 0) - adev->card = atoi(value); + bool recache_device_params = false; + param_val = str_parms_get_str(parms, "card", value, sizeof(value)); + if (param_val >= 0) { + adev->out_card = atoi(value); + recache_device_params = true; + } + + param_val = str_parms_get_str(parms, "device", value, sizeof(value)); + if (param_val >= 0) { + adev->out_device = atoi(value); + recache_device_params = true; + } - ret = str_parms_get_str(parms, "device", value, sizeof(value)); - if (ret >= 0) - adev->device = atoi(value); + if (recache_device_params && adev->out_card >= 0 && adev->out_device >= 0) { + ret_value = read_alsa_device_config(adev->out_card, adev->out_device, PCM_OUT, + &cached_output_hardware_config); + output_hardware_config_is_cached = (ret_value == 0); + } pthread_mutex_unlock(&adev->lock); str_parms_destroy(parms); - return 0; + return ret_value; } +//TODO(pmclean) it seems like both out_get_parameters() and in_get_parameters() +// could be written in terms of a get_device_parameters(io_type) + static char * out_get_parameters(const struct audio_stream *stream, const char *keys) { - return strdup(""); + ALOGV("usb:audio_hw::out out_get_parameters() keys:%s", keys); + + struct stream_out *out = (struct stream_out *) stream; + struct audio_device *adev = out->dev; + + if (adev->out_card < 0 || adev->out_device < 0) + return strdup(""); + + unsigned min, max; + + struct str_parms *query = str_parms_create_str(keys); + struct str_parms *result = str_parms_create(); + + int num_written = 0; + char buffer[256]; + int buffer_size = sizeof(buffer) / sizeof(buffer[0]); + char* result_str = NULL; + + struct pcm_params * alsa_hw_params = pcm_params_get(adev->out_card, adev->out_device, PCM_OUT); + + // These keys are from hardware/libhardware/include/audio.h + // supported sample rates + if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES)) { + // pcm_hw_params doesn't have a list of supported samples rates, just a min and a max, so + // if they are different, return a list containing those two values, otherwise just the one. + min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE); + max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_RATE); + num_written = snprintf(buffer, buffer_size, "%u", min); + if (min != max) { + snprintf(buffer + num_written, buffer_size - num_written, "|%u", max); + } + str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES, + buffer); + } // AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES + + // supported channel counts + if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_CHANNELS)) { + // Similarly for output channels count + min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS); + max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_CHANNELS); + num_written = snprintf(buffer, buffer_size, "%u", min); + if (min != max) { + snprintf(buffer + num_written, buffer_size - num_written, "|%u", max); + } + str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_CHANNELS, buffer); + } // AUDIO_PARAMETER_STREAM_SUP_CHANNELS + + // supported sample formats + if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_FORMATS)) { + // Similarly for output channels count + //TODO(pmclean): this is wrong. + min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_SAMPLE_BITS); + max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_SAMPLE_BITS); + num_written = snprintf(buffer, buffer_size, "%u", min); + if (min != max) { + snprintf(buffer + num_written, buffer_size - num_written, "|%u", max); + } + str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_FORMATS, buffer); + } // AUDIO_PARAMETER_STREAM_SUP_FORMATS + + result_str = str_parms_to_str(result); + + // done with these... + str_parms_destroy(query); + str_parms_destroy(result); + + return result_str; } static uint32_t out_get_latency(const struct audio_stream_out *stream) { - return (pcm_config.period_size * pcm_config.period_count * 1000) / - out_get_sample_rate(&stream->common); + struct stream_out *out = (struct stream_out *) stream; + + //TODO(pmclean): Do we need a term here for the USB latency + // (as reported in the USB descriptors)? + uint32_t latency = (cached_output_hardware_config.period_size + * cached_output_hardware_config.period_count * 1000) / out_get_sample_rate(&stream->common); + return latency; } -static int out_set_volume(struct audio_stream_out *stream, float left, - float right) +static int out_set_volume(struct audio_stream_out *stream, float left, float right) { return -ENOSYS; } -static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, - size_t bytes) +/* must be called with hw device and output stream mutexes locked */ +static int start_output_stream(struct stream_out *out) +{ + struct audio_device *adev = out->dev; + int return_val = 0; + + ALOGV("usb:audio_hw::out start_output_stream(card:%d device:%d)", + adev->out_card, adev->out_device); + + out->pcm = pcm_open(adev->out_card, adev->out_device, PCM_OUT, &cached_output_hardware_config); + if (out->pcm == NULL) { + return -ENOMEM; + } + + if (out->pcm && !pcm_is_ready(out->pcm)) { + ALOGE("audio_hw audio_hw pcm_open() failed: %s", pcm_get_error(out->pcm)); + pcm_close(out->pcm); + return -ENOMEM; + } + + return 0; +} + +static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes) { int ret; struct stream_out *out = (struct stream_out *)stream; @@ -203,7 +651,59 @@ static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, out->standby = false; } - pcm_write(out->pcm, (void *)buffer, bytes); + // Setup conversion buffer + // compute maximum potential buffer size. + // * 2 for stereo -> quad conversion + // * 3/2 for 16bit -> 24 bit conversion + size_t required_conversion_buffer_size = (bytes * 3 * 2) / 2; + if (required_conversion_buffer_size > out->conversion_buffer_size) { + //TODO(pmclean) - remove this when AudioPolicyManger/AudioFlinger support arbitrary formats + // (and do these conversions themselves) + out->conversion_buffer_size = required_conversion_buffer_size; + out->conversion_buffer = realloc(out->conversion_buffer, out->conversion_buffer_size); + } + + const void * write_buff = buffer; + int num_write_buff_bytes = bytes; + + /* + * Num Channels conversion + */ + int num_device_channels = cached_output_hardware_config.channels; + int num_req_channels = 2; /* always, for now */ + if (num_device_channels != num_req_channels) { + num_write_buff_bytes = + expand_channels_16(write_buff, num_req_channels, + out->conversion_buffer, num_device_channels, + num_write_buff_bytes / sizeof(short)); + write_buff = out->conversion_buffer; + } + + /* + * 16 vs 24-bit logic here + */ + switch (cached_output_hardware_config.format) { + case PCM_FORMAT_S16_LE: + // the output format is the same as the input format, so just write it out + break; + + case PCM_FORMAT_S24_3LE: + // 16-bit LE2 - 24-bit LE3 + num_write_buff_bytes = convert_16_to_24_3(write_buff, + num_write_buff_bytes / sizeof(short), + out->conversion_buffer); + write_buff = out->conversion_buffer; + break; + + default: + // hmmmmm..... + ALOGV("usb:Unknown Format!!!"); + break; + } + + if (write_buff != NULL && num_write_buff_bytes != 0) { + pcm_write(out->pcm, write_buff, num_write_buff_bytes); + } pthread_mutex_unlock(&out->lock); pthread_mutex_unlock(&out->dev->lock); @@ -221,8 +721,7 @@ err: return bytes; } -static int out_get_render_position(const struct audio_stream_out *stream, - uint32_t *dsp_frames) +static int out_get_render_position(const struct audio_stream_out *stream, uint32_t *dsp_frames) { return -EINVAL; } @@ -237,8 +736,7 @@ static int out_remove_audio_effect(const struct audio_stream *stream, effect_han return 0; } -static int out_get_next_write_timestamp(const struct audio_stream_out *stream, - int64_t *timestamp) +static int out_get_next_write_timestamp(const struct audio_stream_out *stream, int64_t *timestamp) { return -EINVAL; } @@ -250,14 +748,18 @@ static int adev_open_output_stream(struct audio_hw_device *dev, struct audio_config *config, struct audio_stream_out **stream_out) { + ALOGV("usb:audio_hw::out adev_open_output_stream() handle:0x%X, device:0x%X, flags:0x%X", + handle, devices, flags); + struct audio_device *adev = (struct audio_device *)dev; + struct stream_out *out; - int ret; out = (struct stream_out *)calloc(1, sizeof(struct stream_out)); if (!out) return -ENOMEM; + // setup function pointers out->stream.common.get_sample_rate = out_get_sample_rate; out->stream.common.set_sample_rate = out_set_sample_rate; out->stream.common.get_buffer_size = out_get_buffer_size; @@ -278,14 +780,37 @@ static int adev_open_output_stream(struct audio_hw_device *dev, out->dev = adev; - config->format = out_get_format(&out->stream.common); - config->channel_mask = out_get_channels(&out->stream.common); - config->sample_rate = out_get_sample_rate(&out->stream.common); + if (output_hardware_config_is_cached) { + config->sample_rate = cached_output_hardware_config.rate; - out->standby = true; + config->format = alsa_to_fw_format_id(cached_output_hardware_config.format); + if (config->format != AUDIO_FORMAT_PCM_16_BIT) { + // Always report PCM16 for now. AudioPolicyManagerBase/AudioFlinger dont' understand + // formats with more other format, so we won't get chosen (say with a 24bit DAC). + //TODO(pmclean) remove this when the above restriction is removed. + config->format = AUDIO_FORMAT_PCM_16_BIT; + } + + config->channel_mask = + audio_channel_out_mask_from_count(cached_output_hardware_config.channels); + if (config->channel_mask != AUDIO_CHANNEL_OUT_STEREO) { + // Always report STEREO for now. AudioPolicyManagerBase/AudioFlinger dont' understand + // formats with more channels, so we won't get chosen (say with a 4-channel DAC). + //TODO(pmclean) remove this when the above restriction is removed. + config->channel_mask = AUDIO_CHANNEL_OUT_STEREO; + } + } else { + cached_output_hardware_config = default_alsa_out_config; + + config->format = out_get_format(&out->stream.common); + config->channel_mask = out_get_channels(&out->stream.common); + config->sample_rate = out_get_sample_rate(&out->stream.common); + } - adev->card = -1; - adev->device = -1; + out->conversion_buffer = NULL; + out->conversion_buffer_size = 0; + + out->standby = true; *stream_out = &out->stream; return 0; @@ -293,15 +818,23 @@ static int adev_open_output_stream(struct audio_hw_device *dev, err_open: free(out); *stream_out = NULL; - return ret; + return -ENOSYS; } static void adev_close_output_stream(struct audio_hw_device *dev, struct audio_stream_out *stream) { + ALOGV("usb:audio_hw::out adev_close_output_stream()"); struct stream_out *out = (struct stream_out *)stream; + //TODO(pmclean) why are we doing this when stream get's freed at the end + // because it closes the pcm device out_standby(&stream->common); + + free(out->conversion_buffer); + out->conversion_buffer = NULL; + out->conversion_buffer_size = 0; + free(stream); } @@ -310,8 +843,7 @@ static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs) return 0; } -static char * adev_get_parameters(const struct audio_hw_device *dev, - const char *keys) +static char * adev_get_parameters(const struct audio_hw_device *dev, const char *keys) { return strdup(""); } @@ -352,18 +884,390 @@ static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev, return 0; } +/* Helper functions */ +static uint32_t in_get_sample_rate(const struct audio_stream *stream) +{ + return cached_input_hardware_config.rate; +} + +static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate) +{ + return -ENOSYS; +} + +static size_t in_get_buffer_size(const struct audio_stream *stream) +{ + ALOGV("usb: in_get_buffer_size() = %zu", + cached_input_hardware_config.period_size * audio_stream_frame_size(stream)); + return cached_input_hardware_config.period_size * audio_stream_frame_size(stream); + +} + +static uint32_t in_get_channels(const struct audio_stream *stream) +{ + // just report stereo for now + return AUDIO_CHANNEL_IN_STEREO; +} + +static audio_format_t in_get_format(const struct audio_stream *stream) +{ + // just report 16-bit, pcm for now. + return AUDIO_FORMAT_PCM_16_BIT; +} + +static int in_set_format(struct audio_stream *stream, audio_format_t format) +{ + return -ENOSYS; +} + +static int in_standby(struct audio_stream *stream) +{ + struct stream_in *in = (struct stream_in *) stream; + + pthread_mutex_lock(&in->dev->lock); + pthread_mutex_lock(&in->lock); + + if (!in->standby) { + pcm_close(in->pcm); + in->pcm = NULL; + in->standby = true; + } + + pthread_mutex_unlock(&in->lock); + pthread_mutex_unlock(&in->dev->lock); + + return 0; +} + +static int in_dump(const struct audio_stream *stream, int fd) +{ + return 0; +} + +static int in_set_parameters(struct audio_stream *stream, const char *kvpairs) +{ + ALOGV("usb: audio_hw::in in_set_parameters() keys:%s", kvpairs); + + struct stream_in *in = (struct stream_in *)stream; + struct audio_device *adev = in->dev; + struct str_parms *parms; + char value[32]; + int param_val; + int routing = 0; + int ret_value = 0; + + parms = str_parms_create_str(kvpairs); + pthread_mutex_lock(&adev->lock); + + bool recache_device_params = false; + + // Card/Device + param_val = str_parms_get_str(parms, "card", value, sizeof(value)); + if (param_val >= 0) { + adev->in_card = atoi(value); + recache_device_params = true; + } + + param_val = str_parms_get_str(parms, "device", value, sizeof(value)); + if (param_val >= 0) { + adev->in_device = atoi(value); + recache_device_params = true; + } + + if (recache_device_params && adev->in_card >= 0 && adev->in_device >= 0) { + ret_value = read_alsa_device_config(adev->in_card, adev->in_device, + PCM_IN, &(cached_input_hardware_config)); + input_hardware_config_is_cached = (ret_value == 0); + } + + pthread_mutex_unlock(&adev->lock); + str_parms_destroy(parms); + + return ret_value; +} + +//TODO(pmclean) it seems like both out_get_parameters() and in_get_parameters() +// could be written in terms of a get_device_parameters(io_type) + +static char * in_get_parameters(const struct audio_stream *stream, const char *keys) { + ALOGV("usb:audio_hw::in in_get_parameters() keys:%s", keys); + + struct stream_in *in = (struct stream_in *)stream; + struct audio_device *adev = in->dev; + + if (adev->in_card < 0 || adev->in_device < 0) + return strdup(""); + + struct pcm_params * alsa_hw_params = pcm_params_get(adev->in_card, adev->in_device, PCM_IN); + if (alsa_hw_params == NULL) + return strdup(""); + + struct str_parms *query = str_parms_create_str(keys); + struct str_parms *result = str_parms_create(); + + int num_written = 0; + char buffer[256]; + int buffer_size = sizeof(buffer) / sizeof(buffer[0]); + char* result_str = NULL; + + unsigned min, max; + + // These keys are from hardware/libhardware/include/audio.h + // supported sample rates + if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES)) { + // pcm_hw_params doesn't have a list of supported samples rates, just a min and a max, so + // if they are different, return a list containing those two values, otherwise just the one. + min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_RATE); + max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_RATE); + num_written = snprintf(buffer, buffer_size, "%u", min); + if (min != max) { + snprintf(buffer + num_written, buffer_size - num_written, "|%u", max); + } + str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SAMPLING_RATE, buffer); + } // AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES + + // supported channel counts + if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_CHANNELS)) { + // Similarly for output channels count + min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_CHANNELS); + max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_CHANNELS); + num_written = snprintf(buffer, buffer_size, "%u", min); + if (min != max) { + snprintf(buffer + num_written, buffer_size - num_written, "|%u", max); + } + str_parms_add_str(result, AUDIO_PARAMETER_STREAM_CHANNELS, buffer); + } // AUDIO_PARAMETER_STREAM_SUP_CHANNELS + + // supported sample formats + if (str_parms_has_key(query, AUDIO_PARAMETER_STREAM_SUP_FORMATS)) { + //TODO(pmclean): this is wrong. + min = pcm_params_get_min(alsa_hw_params, PCM_PARAM_SAMPLE_BITS); + max = pcm_params_get_max(alsa_hw_params, PCM_PARAM_SAMPLE_BITS); + num_written = snprintf(buffer, buffer_size, "%u", min); + if (min != max) { + snprintf(buffer + num_written, buffer_size - num_written, "|%u", max); + } + str_parms_add_str(result, AUDIO_PARAMETER_STREAM_SUP_FORMATS, buffer); + } // AUDIO_PARAMETER_STREAM_SUP_FORMATS + + result_str = str_parms_to_str(result); + + // done with these... + str_parms_destroy(query); + str_parms_destroy(result); + + return result_str; +} + +static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect) +{ + return 0; +} + +static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect) +{ + return 0; +} + +static int in_set_gain(struct audio_stream_in *stream, float gain) +{ + return 0; +} + +/* must be called with hw device and output stream mutexes locked */ +static int start_input_stream(struct stream_in *in) { + struct audio_device *adev = in->dev; + int return_val = 0; + + ALOGV("usb:audio_hw::start_input_stream(card:%d device:%d)", + adev->in_card, adev->in_device); + + in->pcm = pcm_open(adev->in_card, adev->in_device, PCM_IN, &cached_input_hardware_config); + if (in->pcm == NULL) { + ALOGE("usb:audio_hw pcm_open() in->pcm == NULL"); + return -ENOMEM; + } + + if (in->pcm && !pcm_is_ready(in->pcm)) { + ALOGE("usb:audio_hw audio_hw pcm_open() failed: %s", pcm_get_error(in->pcm)); + pcm_close(in->pcm); + return -ENOMEM; + } + + return 0; +} + +//TODO(pmclean) mutex stuff here (see out_write) +static ssize_t in_read(struct audio_stream_in *stream, void* buffer, size_t bytes) +{ + size_t num_read_buff_bytes = 0; + void * read_buff = buffer; + void * out_buff = buffer; + + struct stream_in * in = (struct stream_in *) stream; + + ALOGV("usb: in_read(%d)", bytes); + + pthread_mutex_lock(&in->dev->lock); + pthread_mutex_lock(&in->lock); + + if (in->standby) { + if (start_input_stream(in) != 0) { + goto err; + } + in->standby = false; + } + + // OK, we need to figure out how much data to read to be able to output the requested + // number of bytes in the HAL format (16-bit, stereo). + num_read_buff_bytes = bytes; + int num_device_channels = cached_input_hardware_config.channels; + int num_req_channels = 2; /* always, for now */ + + if (num_device_channels != num_req_channels) { + num_read_buff_bytes = (num_device_channels * num_read_buff_bytes) / num_req_channels; + } + + if (cached_output_hardware_config.format == PCM_FORMAT_S24_3LE) { + num_read_buff_bytes = (3 * num_read_buff_bytes) / 2; + } + + // Setup/Realloc the conversion buffer (if necessary). + if (num_read_buff_bytes != bytes) { + if (num_read_buff_bytes > in->conversion_buffer_size) { + //TODO(pmclean) - remove this when AudioPolicyManger/AudioFlinger support arbitrary formats + // (and do these conversions themselves) + in->conversion_buffer_size = num_read_buff_bytes; + in->conversion_buffer = realloc(in->conversion_buffer, in->conversion_buffer_size); + } + read_buff = in->conversion_buffer; + } + + if (pcm_read(in->pcm, read_buff, num_read_buff_bytes) == 0) { + /* + * Do any conversions necessary to send the data in the format specified to/by the HAL + * (but different from the ALSA format), such as 24bit ->16bit, or 4chan -> 2chan. + */ + if (cached_output_hardware_config.format == PCM_FORMAT_S24_3LE) { + if (num_device_channels != num_req_channels) { + out_buff = read_buff; + } + + /* Bit Format Conversion */ + num_read_buff_bytes = + convert_24_3_to_16(read_buff, num_read_buff_bytes / 3, out_buff); + } + + if (num_device_channels != num_req_channels) { + out_buff = buffer; + /* Num Channels conversion */ + if (num_device_channels < num_req_channels) { + num_read_buff_bytes = + contract_channels_16(read_buff, num_device_channels, + out_buff, num_req_channels, + num_read_buff_bytes / sizeof(short)); + } else { + num_read_buff_bytes = + expand_channels_16(read_buff, num_device_channels, + out_buff, num_req_channels, + num_read_buff_bytes / sizeof(short)); + } + } + } + +err: + pthread_mutex_unlock(&in->lock); + pthread_mutex_unlock(&in->dev->lock); + + return num_read_buff_bytes; +} + +static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream) +{ + return 0; +} + static int adev_open_input_stream(struct audio_hw_device *dev, audio_io_handle_t handle, audio_devices_t devices, struct audio_config *config, struct audio_stream_in **stream_in) { - return -ENOSYS; + ALOGV("usb: in adev_open_input_stream() rate:%" PRIu32 ", chanMask:0x%" PRIX32 ", fmt:%" PRIu8, + config->sample_rate, config->channel_mask, config->format); + + struct stream_in *in = (struct stream_in *)calloc(1, sizeof(struct stream_in)); + if (in == NULL) + return -ENOMEM; + + // setup function pointers + in->stream.common.get_sample_rate = in_get_sample_rate; + in->stream.common.set_sample_rate = in_set_sample_rate; + in->stream.common.get_buffer_size = in_get_buffer_size; + in->stream.common.get_channels = in_get_channels; + in->stream.common.get_format = in_get_format; + in->stream.common.set_format = in_set_format; + in->stream.common.standby = in_standby; + in->stream.common.dump = in_dump; + in->stream.common.set_parameters = in_set_parameters; + in->stream.common.get_parameters = in_get_parameters; + in->stream.common.add_audio_effect = in_add_audio_effect; + in->stream.common.remove_audio_effect = in_remove_audio_effect; + + in->stream.set_gain = in_set_gain; + in->stream.read = in_read; + in->stream.get_input_frames_lost = in_get_input_frames_lost; + + in->dev = (struct audio_device *)dev; + + if (output_hardware_config_is_cached) { + config->sample_rate = cached_output_hardware_config.rate; + + config->format = alsa_to_fw_format_id(cached_output_hardware_config.format); + if (config->format != AUDIO_FORMAT_PCM_16_BIT) { + // Always report PCM16 for now. AudioPolicyManagerBase/AudioFlinger dont' understand + // formats with more other format, so we won't get chosen (say with a 24bit DAC). + //TODO(pmclean) remove this when the above restriction is removed. + config->format = AUDIO_FORMAT_PCM_16_BIT; + } + + config->channel_mask = audio_channel_out_mask_from_count( + cached_output_hardware_config.channels); + if (config->channel_mask != AUDIO_CHANNEL_OUT_STEREO) { + // Always report STEREO for now. AudioPolicyManagerBase/AudioFlinger dont' understand + // formats with more channels, so we won't get chosen (say with a 4-channel DAC). + //TODO(pmclean) remove this when the above restriction is removed. + config->channel_mask = AUDIO_CHANNEL_OUT_STEREO; + } + } else { + cached_input_hardware_config = default_alsa_in_config; + + config->format = out_get_format(&in->stream.common); + config->channel_mask = out_get_channels(&in->stream.common); + config->sample_rate = out_get_sample_rate(&in->stream.common); + } + + in->standby = true; + + in->conversion_buffer = NULL; + in->conversion_buffer_size = 0; + + *stream_in = &in->stream; + + return 0; } -static void adev_close_input_stream(struct audio_hw_device *dev, - struct audio_stream_in *stream) +static void adev_close_input_stream(struct audio_hw_device *dev, struct audio_stream_in *stream) { + struct stream_in *in = (struct stream_in *)stream; + + //TODO(pmclean) why are we doing this when stream get's freed at the end + // because it closes the pcm device + in_standby(&stream->common); + + free(in->conversion_buffer); + + free(stream); } static int adev_dump(const audio_hw_device_t *device, int fd) @@ -374,21 +1278,20 @@ static int adev_dump(const audio_hw_device_t *device, int fd) static int adev_close(hw_device_t *device) { struct audio_device *adev = (struct audio_device *)device; - free(device); + + output_hardware_config_is_cached = false; + input_hardware_config_is_cached = false; + return 0; } -static int adev_open(const hw_module_t* module, const char* name, - hw_device_t** device) +static int adev_open(const hw_module_t* module, const char* name, hw_device_t** device) { - struct audio_device *adev; - int ret; - if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0) return -EINVAL; - adev = calloc(1, sizeof(struct audio_device)); + struct audio_device *adev = calloc(1, sizeof(struct audio_device)); if (!adev) return -ENOMEM; diff --git a/tests/camera2/CameraBurstTests.cpp b/tests/camera2/CameraBurstTests.cpp index 7301fce..65ff460 100644 --- a/tests/camera2/CameraBurstTests.cpp +++ b/tests/camera2/CameraBurstTests.cpp @@ -15,6 +15,7 @@ */ #include <gtest/gtest.h> +#include <inttypes.h> #define LOG_TAG "CameraBurstTest" //#define LOG_NDEBUG 0 @@ -218,7 +219,7 @@ TEST_F(CameraBurstTest, ManualExposureControl) { CameraMetadata tmpRequest = previewRequest; ASSERT_EQ(OK, tmpRequest.update(ANDROID_SENSOR_EXPOSURE_TIME, &exposures[i], 1)); - ALOGV("Submitting capture request %d with exposure %lld", i, + ALOGV("Submitting capture request %d with exposure %"PRId64, i, exposures[i]); dout << "Capture request " << i << " exposure is " << (exposures[i]/1e6f) << std::endl; @@ -230,11 +231,11 @@ TEST_F(CameraBurstTest, ManualExposureControl) { float brightnesses[CAMERA_FRAME_BURST_COUNT]; // Get each frame (metadata) and then the buffer. Calculate brightness. for (int i = 0; i < CAMERA_FRAME_BURST_COUNT; ++i) { - ALOGV("Reading capture request %d with exposure %lld", i, exposures[i]); + ALOGV("Reading capture request %d with exposure %"PRId64, i, exposures[i]); ASSERT_EQ(OK, mDevice->waitForNextFrame(CAMERA_FRAME_TIMEOUT)); ALOGV("Reading capture request-1 %d", i); - CameraMetadata frameMetadata; - ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata)); + CaptureResult result; + ASSERT_EQ(OK, mDevice->getNextResult(&result)); ALOGV("Reading capture request-2 %d", i); ASSERT_EQ(OK, mFrameListener->waitForFrame(CAMERA_FRAME_TIMEOUT)); @@ -613,7 +614,7 @@ TEST_F(CameraBurstTest, VariableBurst) { &durationList[i], 1)); ASSERT_EQ(OK, tmpRequest.update(ANDROID_SENSOR_SENSITIVITY, &sensitivityList[i], 1)); - ALOGV("Submitting capture %d with exposure %lld, frame duration %lld, sensitivity %d", + ALOGV("Submitting capture %zu with exposure %"PRId64", frame duration %"PRId64", sensitivity %d", i, expList[i], durationList[i], sensitivityList[i]); dout << "Capture request " << i << ": exposure is " << (expList[i]/1e6f) << " ms" << @@ -631,7 +632,7 @@ TEST_F(CameraBurstTest, VariableBurst) { // Get each frame (metadata) and then the buffer. Calculate brightness. for (size_t i = 0; i < expList.size(); ++i) { - ALOGV("Reading request %d", i); + ALOGV("Reading request %zu", i); dout << "Waiting for capture " << i << ": " << " exposure " << (expList[i]/1e6f) << " ms," << " frame duration " << (durationList[i]/1e6f) << " ms," << @@ -644,10 +645,10 @@ TEST_F(CameraBurstTest, VariableBurst) { if (durationList[i] * 2 > waitLimit) waitLimit = durationList[i] * 2; ASSERT_EQ(OK, mDevice->waitForNextFrame(waitLimit)); - ALOGV("Reading capture request-1 %d", i); - CameraMetadata frameMetadata; - ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata)); - ALOGV("Reading capture request-2 %d", i); + ALOGV("Reading capture request-1 %zu", i); + CaptureResult result; + ASSERT_EQ(OK, mDevice->getNextResult(&result)); + ALOGV("Reading capture request-2 %zu", i); ASSERT_EQ(OK, mFrameListener->waitForFrame(CAMERA_FRAME_TIMEOUT)); ALOGV("We got the frame now"); @@ -668,7 +669,7 @@ TEST_F(CameraBurstTest, VariableBurst) { avgBrightness = 255; } - ALOGV("Total brightness for frame %d was %lld (underexposed %d, " + ALOGV("Total brightness for frame %zu was %lld (underexposed %d, " "overexposed %d), avg %f", i, brightness, underexposed, overexposed, avgBrightness); dout << "Average brightness (frame " << i << ") was " << avgBrightness @@ -711,7 +712,7 @@ TEST_F(CameraBurstTest, VariableBurst) { if (dumpFrames) { String8 dumpName = - String8::format("/data/local/tmp/camera2_test_variable_burst_frame_%03d.yuv", i); + String8::format("/data/local/tmp/camera2_test_variable_burst_frame_%03zu.yuv", i); dout << " Writing YUV dump to " << dumpName << std::endl; DumpYuvToFile(dumpName, imgBuffer); } diff --git a/tests/camera2/CameraFrameTests.cpp b/tests/camera2/CameraFrameTests.cpp index e78a862..3c5abf7 100644 --- a/tests/camera2/CameraFrameTests.cpp +++ b/tests/camera2/CameraFrameTests.cpp @@ -115,8 +115,8 @@ TEST_P(CameraFrameTest, GetFrame) { ALOGV("Reading capture request %d", i); ASSERT_EQ(OK, mDevice->waitForNextFrame(CAMERA_FRAME_TIMEOUT)); - CameraMetadata frameMetadata; - ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata)); + CaptureResult result; + ASSERT_EQ(OK, mDevice->getNextResult(&result)); // wait for buffer to be available ASSERT_EQ(OK, mFrameListener->waitForFrame(CAMERA_FRAME_TIMEOUT)); diff --git a/tests/camera2/CameraMultiStreamTests.cpp b/tests/camera2/CameraMultiStreamTests.cpp index a78950c..536b656 100644 --- a/tests/camera2/CameraMultiStreamTests.cpp +++ b/tests/camera2/CameraMultiStreamTests.cpp @@ -14,6 +14,7 @@ * limitations under the License. */ +#include <inttypes.h> #define LOG_TAG "CameraMultiStreamTest" //#define LOG_NDEBUG 0 #include "CameraStreamFixture.h" @@ -181,11 +182,13 @@ public: mHeight(height) { mFormat = param.mFormat; if (useCpuConsumer) { - sp<BufferQueue> bq = new BufferQueue(); - mCpuConsumer = new CpuConsumer(bq, param.mHeapCount); + sp<IGraphicBufferProducer> producer; + sp<IGraphicBufferConsumer> consumer; + BufferQueue::createBufferQueue(&producer, &consumer); + mCpuConsumer = new CpuConsumer(consumer, param.mHeapCount); mCpuConsumer->setName(String8( "CameraMultiStreamTest::mCpuConsumer")); - mNativeWindow = new Surface(bq); + mNativeWindow = new Surface(producer); } else { // Render the stream to screen. mCpuConsumer = NULL; @@ -353,7 +356,7 @@ public: ASSERT_EQ(OK, request.update(ANDROID_SENSOR_EXPOSURE_TIME, &exposures[i], 1)); ASSERT_EQ(OK, request.update(ANDROID_SENSOR_SENSITIVITY, &sensitivities[i], 1)); ASSERT_EQ(OK, mDevice->capture(request)); - ALOGV("Submitting request with: id %d with exposure %lld, sensitivity %d", + ALOGV("Submitting request with: id %d with exposure %"PRId64", sensitivity %d", *requestIdStart, exposures[i], sensitivities[i]); if (CAMERA_MULTI_STREAM_DEBUGGING) { request.dump(STDOUT_FILENO); @@ -368,7 +371,7 @@ public: // Set wait limit based on expected frame duration. int64_t waitLimit = CAMERA_FRAME_TIMEOUT; for (size_t i = 0; i < requestCount; i++) { - ALOGV("Reading request result %d", i); + ALOGV("Reading request result %zu", i); /** * Raise the timeout to be at least twice as long as the exposure @@ -378,11 +381,13 @@ public: waitLimit = exposures[i] * EXP_WAIT_MULTIPLIER; } + CaptureResult result; CameraMetadata frameMetadata; int32_t resultRequestId; do { ASSERT_EQ(OK, mDevice->waitForNextFrame(waitLimit)); - ASSERT_EQ(OK, mDevice->getNextFrame(&frameMetadata)); + ASSERT_EQ(OK, mDevice->getNextResult(&result)); + frameMetadata = result.mMetadata; camera_metadata_entry_t resultEntry = frameMetadata.find(ANDROID_REQUEST_ID); ASSERT_EQ(1u, resultEntry.count); @@ -392,7 +397,7 @@ public: } } while (resultRequestId != targetRequestId); targetRequestId++; - ALOGV("Got capture burst result for request %d", i); + ALOGV("Got capture burst result for request %zu", i); // Validate capture result if (CAMERA_MULTI_STREAM_DEBUGGING) { @@ -411,7 +416,7 @@ public: captureBurstTimes.push_back(systemTime()); CpuConsumer::LockedBuffer imgBuffer; ASSERT_EQ(OK, consumer->lockNextBuffer(&imgBuffer)); - ALOGV("Got capture buffer for request %d", i); + ALOGV("Got capture buffer for request %zu", i); /** * TODO: Validate capture buffer. Current brightness calculation @@ -523,7 +528,7 @@ TEST_F(CameraMultiStreamTest, MultiBurst) { minFrameDuration = DEFAULT_FRAME_DURATION; } - ALOGV("targeted minimal frame duration is: %lldns", minFrameDuration); + ALOGV("targeted minimal frame duration is: %"PRId64"ns", minFrameDuration); data = &(availableJpegSizes.data.i32[0]); count = availableJpegSizes.count; @@ -643,7 +648,7 @@ TEST_F(CameraMultiStreamTest, MultiBurst) { ASSERT_EQ(OK, previewRequest.update( ANDROID_SENSOR_EXPOSURE_TIME, &exposures[i], 1)); - ALOGV("Submitting preview request %d with exposure %lld", + ALOGV("Submitting preview request %zu with exposure %"PRId64, i, exposures[i]); ASSERT_EQ(OK, mDevice->setStreamingRequest(previewRequest)); diff --git a/tests/camera2/CameraStreamFixture.h b/tests/camera2/CameraStreamFixture.h index a1f3aae..12b1971 100644 --- a/tests/camera2/CameraStreamFixture.h +++ b/tests/camera2/CameraStreamFixture.h @@ -161,11 +161,13 @@ protected: sp<CameraDeviceBase> device = mDevice; CameraStreamParams p = mParam; - sp<BufferQueue> bq = new BufferQueue(); - mCpuConsumer = new CpuConsumer(bq, p.mHeapCount); + sp<IGraphicBufferProducer> producer; + sp<IGraphicBufferConsumer> consumer; + BufferQueue::createBufferQueue(&producer, &consumer); + mCpuConsumer = new CpuConsumer(consumer, p.mHeapCount); mCpuConsumer->setName(String8("CameraStreamTest::mCpuConsumer")); - mNativeWindow = new Surface(bq); + mNativeWindow = new Surface(producer); int format = MapAutoFormat(p.mFormat); diff --git a/tests/camera2/camera2.cpp b/tests/camera2/camera2.cpp index 600d440..e3e7d9a 100644 --- a/tests/camera2/camera2.cpp +++ b/tests/camera2/camera2.cpp @@ -172,13 +172,6 @@ class Camera2Test: public testing::Test { err = listener.getNotificationsFrom(dev); if (err != OK) return err; - vendor_tag_query_ops_t *vendor_metadata_tag_ops; - err = dev->ops->get_metadata_vendor_tag_ops(dev, &vendor_metadata_tag_ops); - if (err != OK) return err; - - err = set_camera_metadata_vendor_tag_ops(vendor_metadata_tag_ops); - if (err != OK) return err; - return OK; } @@ -388,8 +381,10 @@ TEST_F(Camera2Test, Capture1Raw) { ASSERT_NO_FATAL_FAILURE(setUpCamera(id)); - sp<BufferQueue> bq = new BufferQueue(); - sp<CpuConsumer> rawConsumer = new CpuConsumer(bq, 1); + sp<IGraphicBufferProducer> bqProducer; + sp<IGraphicBufferConsumer> bqConsumer; + BufferQueue::createBufferQueue(&bqProducer, &bqConsumer); + sp<CpuConsumer> rawConsumer = new CpuConsumer(bqConsumer, 1); sp<FrameWaiter> rawWaiter = new FrameWaiter(); rawConsumer->setFrameAvailableListener(rawWaiter); @@ -420,7 +415,7 @@ TEST_F(Camera2Test, Capture1Raw) { int streamId; ASSERT_NO_FATAL_FAILURE( - setUpStream(bq, width, height, format, &streamId) ); + setUpStream(bqProducer, width, height, format, &streamId) ); camera_metadata_t *request; request = allocate_camera_metadata(20, 2000); @@ -522,8 +517,10 @@ TEST_F(Camera2Test, CaptureBurstRaw) { ASSERT_NO_FATAL_FAILURE(setUpCamera(id)); - sp<BufferQueue> bq = new BufferQueue(); - sp<CpuConsumer> rawConsumer = new CpuConsumer(bq, 1); + sp<IGraphicBufferProducer> bqProducer; + sp<IGraphicBufferConsumer> bqConsumer; + BufferQueue::createBufferQueue(&bqProducer, &bqConsumer); + sp<CpuConsumer> rawConsumer = new CpuConsumer(bqConsumer, 1); sp<FrameWaiter> rawWaiter = new FrameWaiter(); rawConsumer->setFrameAvailableListener(rawWaiter); @@ -554,7 +551,7 @@ TEST_F(Camera2Test, CaptureBurstRaw) { int streamId; ASSERT_NO_FATAL_FAILURE( - setUpStream(bq, width, height, format, &streamId) ); + setUpStream(bqProducer, width, height, format, &streamId) ); camera_metadata_t *request; request = allocate_camera_metadata(20, 2000); @@ -703,8 +700,10 @@ TEST_F(Camera2Test, Capture1Jpeg) { ASSERT_NO_FATAL_FAILURE(setUpCamera(id)); - sp<BufferQueue> bq = new BufferQueue(); - sp<CpuConsumer> jpegConsumer = new CpuConsumer(bq, 1); + sp<IGraphicBufferProducer> bqProducer; + sp<IGraphicBufferConsumer> bqConsumer; + BufferQueue::createBufferQueue(&bqProducer, &bqConsumer); + sp<CpuConsumer> jpegConsumer = new CpuConsumer(bqConsumer, 1); sp<FrameWaiter> jpegWaiter = new FrameWaiter(); jpegConsumer->setFrameAvailableListener(jpegWaiter); @@ -723,7 +722,7 @@ TEST_F(Camera2Test, Capture1Jpeg) { int streamId; ASSERT_NO_FATAL_FAILURE( - setUpStream(bq, width, height, format, &streamId) ); + setUpStream(bqProducer, width, height, format, &streamId) ); camera_metadata_t *request; request = allocate_camera_metadata(20, 2000); diff --git a/tests/fingerprint/Android.mk b/tests/fingerprint/Android.mk new file mode 100644 index 0000000..4f03c39 --- /dev/null +++ b/tests/fingerprint/Android.mk @@ -0,0 +1,19 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_SRC_FILES:= \ + fingerprint_tests.cpp \ + +LOCAL_SHARED_LIBRARIES := \ + liblog \ + libhardware \ + +#LOCAL_C_INCLUDES += \ +# system/media/camera/include \ + +LOCAL_CFLAGS += -Wall -Wextra + +LOCAL_MODULE:= fingerprint_tests +LOCAL_MODULE_TAGS := tests + +include $(BUILD_NATIVE_TEST) diff --git a/tests/fingerprint/fingerprint_test_fixtures.h b/tests/fingerprint/fingerprint_test_fixtures.h new file mode 100644 index 0000000..a526203 --- /dev/null +++ b/tests/fingerprint/fingerprint_test_fixtures.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ANDROID_HAL_FINGERPRINT_TEST_COMMON__ +#define __ANDROID_HAL_FINGERPRINT_TEST_COMMON__ + +#include <gtest/gtest.h> +#include <hardware/hardware.h> +#include <hardware/fingerprint.h> + +namespace tests { + +static const uint16_t kVersion = HARDWARE_MODULE_API_VERSION(1, 0); + +class FingerprintModule : public testing::Test { + public: + FingerprintModule() : + fp_module_(NULL) {} + ~FingerprintModule() {} + protected: + virtual void SetUp() { + const hw_module_t *hw_module = NULL; + ASSERT_EQ(0, hw_get_module(FINGERPRINT_HARDWARE_MODULE_ID, &hw_module)) + << "Can't get fingerprint module"; + ASSERT_TRUE(NULL != hw_module) + << "hw_get_module didn't return a valid fingerprint module"; + + fp_module_ = reinterpret_cast<const fingerprint_module_t*>(hw_module); + } + const fingerprint_module_t* fp_module() { return fp_module_; } + private: + const fingerprint_module_t *fp_module_; +}; + +class FingerprintDevice : public FingerprintModule { + public: + FingerprintDevice() : + fp_device_(NULL) {} + ~FingerprintDevice() {} + protected: + virtual void SetUp() { + FingerprintModule::SetUp(); + hw_device_t *device = NULL; + ASSERT_TRUE(NULL != fp_module()->common.methods->open) + << "Fingerprint open() is unimplemented"; + ASSERT_EQ(0, fp_module()->common.methods->open( + (const hw_module_t*)fp_module(), NULL, &device)) + << "Can't open fingerprint device"; + ASSERT_TRUE(NULL != device) + << "Fingerprint open() returned a NULL device"; + ASSERT_EQ(kVersion, device->version) + << "Unsupported version"; + fp_device_ = reinterpret_cast<fingerprint_device_t*>(device); + } + fingerprint_device_t* fp_device() { return fp_device_; } + private: + fingerprint_device_t *fp_device_; +}; + +} // namespace tests + +#endif // __ANDROID_HAL_FINGERPRINT_TEST_COMMON__ diff --git a/tests/fingerprint/fingerprint_tests.cpp b/tests/fingerprint/fingerprint_tests.cpp new file mode 100644 index 0000000..4463751 --- /dev/null +++ b/tests/fingerprint/fingerprint_tests.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <gtest/gtest.h> +#include "fingerprint_test_fixtures.h" + +namespace tests { + +TEST_F(FingerprintDevice, isThereEnroll) { + ASSERT_TRUE(NULL != fp_device()->enroll) + << "enroll() function is not implemented"; +} + +TEST_F(FingerprintDevice, isThereRemove) { + ASSERT_TRUE(NULL != fp_device()->remove) + << "remove() function is not implemented"; +} + +TEST_F(FingerprintDevice, isThereSetNotify) { + ASSERT_TRUE(NULL != fp_device()->set_notify) + << "set_notify() function is not implemented"; +} + +} // namespace tests diff --git a/tests/hardware/Android.mk b/tests/hardware/Android.mk new file mode 100644 index 0000000..02540c9 --- /dev/null +++ b/tests/hardware/Android.mk @@ -0,0 +1,12 @@ +LOCAL_PATH:= $(call my-dir) +include $(CLEAR_VARS) + +LOCAL_MODULE := static-hal-check +LOCAL_SRC_FILES := struct-size.cpp struct-offset.cpp struct-last.cpp +LOCAL_SHARED_LIBRARIES := libhardware +LOCAL_CFLAGS := -std=gnu++11 -O0 + +LOCAL_C_INCLUDES += \ + system/media/camera/include + +include $(BUILD_STATIC_LIBRARY) diff --git a/tests/hardware/struct-last.cpp b/tests/hardware/struct-last.cpp new file mode 100644 index 0000000..44a7b2d --- /dev/null +++ b/tests/hardware/struct-last.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <cstddef> +#include <system/window.h> +#include <hardware/hardware.h> +#include <hardware/sensors.h> +#include <hardware/fb.h> +#include <hardware/hwcomposer.h> +#include <hardware/gralloc.h> +#include <hardware/consumerir.h> +#include <hardware/camera_common.h> +#include <hardware/camera3.h> + +#define GET_PADDING(align, size) (((align) - ((size) % (align))) % (align)) + +#define CHECK_LAST_MEMBER(type, member) \ +do { \ +static constexpr size_t calc_size = offsetof(type, member) + sizeof(((type *)0)->member); \ +static_assert(sizeof(type) == calc_size + GET_PADDING(alignof(type), calc_size), \ +"" #member " is not the last element of " #type); \ +} while (0) + +void CheckSizes(void) { + //Types defined in hardware.h + CHECK_LAST_MEMBER(hw_module_t, reserved); + CHECK_LAST_MEMBER(hw_device_t, close); + + //Types defined in sensors.h + CHECK_LAST_MEMBER(sensors_vec_t, reserved); + CHECK_LAST_MEMBER(sensors_event_t, reserved1); + CHECK_LAST_MEMBER(struct sensor_t, reserved); + CHECK_LAST_MEMBER(sensors_poll_device_1_t, reserved_procs); + + //Types defined in fb.h + CHECK_LAST_MEMBER(framebuffer_device_t, reserved_proc); + + //Types defined in hwcomposer.h + CHECK_LAST_MEMBER(hwc_layer_1_t, reserved); + CHECK_LAST_MEMBER(hwc_composer_device_1_t, reserved_proc); + + //Types defined in gralloc.h + CHECK_LAST_MEMBER(gralloc_module_t, reserved_proc); + CHECK_LAST_MEMBER(alloc_device_t, reserved_proc); + + //Types defined in consumerir.h + CHECK_LAST_MEMBER(consumerir_device_t, reserved); + + //Types defined in camera_common.h + CHECK_LAST_MEMBER(vendor_tag_ops_t, reserved); + CHECK_LAST_MEMBER(camera_module_t, reserved); + + //Types defined in camera3.h + CHECK_LAST_MEMBER(camera3_device_ops_t, reserved); +} + diff --git a/tests/hardware/struct-offset.cpp b/tests/hardware/struct-offset.cpp new file mode 100644 index 0000000..6abe360 --- /dev/null +++ b/tests/hardware/struct-offset.cpp @@ -0,0 +1,224 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <cstddef> +#include <system/window.h> +#include <hardware/hardware.h> +#include <hardware/sensors.h> +#include <hardware/fb.h> +#include <hardware/hwcomposer.h> +#include <hardware/gralloc.h> +#include <hardware/consumerir.h> +#include <hardware/camera_common.h> +#include <hardware/camera3.h> + +//Ideally this would print type.member instead we need to rely on the line number from the output +template <size_t actual, size_t expected> void check_member(void) { + static_assert(actual == expected, ""); +} + +#ifdef __LP64__ +#define CHECK_MEMBER_AT(type, member, off32, off64) \ + check_member<offsetof(type, member), off64>() +#else +#define CHECK_MEMBER_AT(type, member, off32, off64) \ + check_member<offsetof(type, member), off32>() +#endif + +void CheckOffsets(void) { + //Types defined in hardware.h + CHECK_MEMBER_AT(hw_module_t, tag, 0, 0); + CHECK_MEMBER_AT(hw_module_t, module_api_version, 4, 4); + CHECK_MEMBER_AT(hw_module_t, hal_api_version, 6, 6); + CHECK_MEMBER_AT(hw_module_t, id, 8, 8); + CHECK_MEMBER_AT(hw_module_t, name, 12, 16); + CHECK_MEMBER_AT(hw_module_t, author, 16, 24); + CHECK_MEMBER_AT(hw_module_t, methods, 20, 32); + CHECK_MEMBER_AT(hw_module_t, dso, 24, 40); + CHECK_MEMBER_AT(hw_module_t, reserved, 28, 48); + + CHECK_MEMBER_AT(hw_device_t, tag, 0, 0); + CHECK_MEMBER_AT(hw_device_t, version, 4, 4); + CHECK_MEMBER_AT(hw_device_t, module, 8, 8); + CHECK_MEMBER_AT(hw_device_t, reserved, 12, 16); + CHECK_MEMBER_AT(hw_device_t, close, 60, 112); + + //Types defined in sensors.h + CHECK_MEMBER_AT(sensors_vec_t, v, 0, 0); + CHECK_MEMBER_AT(sensors_vec_t, x, 0, 0); + CHECK_MEMBER_AT(sensors_vec_t, y, 4, 4); + CHECK_MEMBER_AT(sensors_vec_t, z, 8, 8); + CHECK_MEMBER_AT(sensors_vec_t, azimuth, 0, 0); + CHECK_MEMBER_AT(sensors_vec_t, pitch, 4, 4); + CHECK_MEMBER_AT(sensors_vec_t, roll, 8, 8); + CHECK_MEMBER_AT(sensors_vec_t, status, 12, 12); + CHECK_MEMBER_AT(sensors_vec_t, reserved, 13, 13); + + CHECK_MEMBER_AT(sensors_event_t, version, 0, 0); + CHECK_MEMBER_AT(sensors_event_t, sensor, 4, 4); + CHECK_MEMBER_AT(sensors_event_t, type, 8, 8); + CHECK_MEMBER_AT(sensors_event_t, reserved0, 12, 12); + CHECK_MEMBER_AT(sensors_event_t, timestamp, 16, 16); + CHECK_MEMBER_AT(sensors_event_t, data, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, acceleration, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, magnetic, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, orientation, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, gyro, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, temperature, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, distance, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, light, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, pressure, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, relative_humidity, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, uncalibrated_gyro, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, uncalibrated_magnetic, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, meta_data, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, u64, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, u64.data, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, u64.step_counter, 24, 24); + CHECK_MEMBER_AT(sensors_event_t, flags, 88, 88); + CHECK_MEMBER_AT(sensors_event_t, reserved1, 92, 92); + + CHECK_MEMBER_AT(struct sensor_t, name, 0, 0); + CHECK_MEMBER_AT(struct sensor_t, vendor, 4, 8); + CHECK_MEMBER_AT(struct sensor_t, version, 8, 16); + CHECK_MEMBER_AT(struct sensor_t, handle, 12, 20); + CHECK_MEMBER_AT(struct sensor_t, type, 16, 24); + CHECK_MEMBER_AT(struct sensor_t, maxRange, 20, 28); + CHECK_MEMBER_AT(struct sensor_t, resolution, 24, 32); + CHECK_MEMBER_AT(struct sensor_t, power, 28, 36); + CHECK_MEMBER_AT(struct sensor_t, minDelay, 32, 40); + CHECK_MEMBER_AT(struct sensor_t, fifoReservedEventCount, 36, 44); + CHECK_MEMBER_AT(struct sensor_t, fifoMaxEventCount, 40, 48); + CHECK_MEMBER_AT(struct sensor_t, stringType, 44, 56); + CHECK_MEMBER_AT(struct sensor_t, requiredPermission, 48, 64); + CHECK_MEMBER_AT(struct sensor_t, maxDelay, 52, 72); + CHECK_MEMBER_AT(struct sensor_t, flags, 56, 80); + CHECK_MEMBER_AT(struct sensor_t, reserved, 60, 88); + + CHECK_MEMBER_AT(sensors_poll_device_1_t, v0, 0, 0); + CHECK_MEMBER_AT(sensors_poll_device_1_t, common, 0, 0); + CHECK_MEMBER_AT(sensors_poll_device_1_t, activate, 64, 120); + CHECK_MEMBER_AT(sensors_poll_device_1_t, setDelay, 68, 128); + CHECK_MEMBER_AT(sensors_poll_device_1_t, poll, 72, 136); + CHECK_MEMBER_AT(sensors_poll_device_1_t, batch, 76, 144); + CHECK_MEMBER_AT(sensors_poll_device_1_t, flush, 80, 152); + CHECK_MEMBER_AT(sensors_poll_device_1_t, reserved_procs, 84, 160); + + //Types defined in fb.h + CHECK_MEMBER_AT(framebuffer_device_t, common, 0, 0); + CHECK_MEMBER_AT(framebuffer_device_t, flags, 64, 120); + CHECK_MEMBER_AT(framebuffer_device_t, width, 68, 124); + CHECK_MEMBER_AT(framebuffer_device_t, height, 72, 128); + CHECK_MEMBER_AT(framebuffer_device_t, stride, 76, 132); + CHECK_MEMBER_AT(framebuffer_device_t, format, 80, 136); + CHECK_MEMBER_AT(framebuffer_device_t, xdpi, 84, 140); + CHECK_MEMBER_AT(framebuffer_device_t, ydpi, 88, 144); + CHECK_MEMBER_AT(framebuffer_device_t, fps, 92, 148); + CHECK_MEMBER_AT(framebuffer_device_t, minSwapInterval, 96, 152); + CHECK_MEMBER_AT(framebuffer_device_t, maxSwapInterval, 100, 156); + CHECK_MEMBER_AT(framebuffer_device_t, numFramebuffers, 104, 160); + CHECK_MEMBER_AT(framebuffer_device_t, reserved, 108, 164); + CHECK_MEMBER_AT(framebuffer_device_t, setSwapInterval, 136, 192); + CHECK_MEMBER_AT(framebuffer_device_t, setUpdateRect, 140, 200); + CHECK_MEMBER_AT(framebuffer_device_t, post, 144, 208); + CHECK_MEMBER_AT(framebuffer_device_t, compositionComplete, 148, 216); + CHECK_MEMBER_AT(framebuffer_device_t, dump, 152, 224); + CHECK_MEMBER_AT(framebuffer_device_t, enableScreen, 156, 232); + CHECK_MEMBER_AT(framebuffer_device_t, reserved_proc, 160, 240); + + //Types defined in hwcomposer.h + CHECK_MEMBER_AT(hwc_layer_1_t, compositionType, 0, 0); + CHECK_MEMBER_AT(hwc_layer_1_t, hints, 4, 4); + CHECK_MEMBER_AT(hwc_layer_1_t, flags, 8, 8); + CHECK_MEMBER_AT(hwc_layer_1_t, backgroundColor, 12, 16); + CHECK_MEMBER_AT(hwc_layer_1_t, handle, 12, 16); + CHECK_MEMBER_AT(hwc_layer_1_t, transform, 16, 24); + CHECK_MEMBER_AT(hwc_layer_1_t, blending, 20, 28); + CHECK_MEMBER_AT(hwc_layer_1_t, sourceCropi, 24, 32); + CHECK_MEMBER_AT(hwc_layer_1_t, sourceCrop, 24, 32); + CHECK_MEMBER_AT(hwc_layer_1_t, sourceCropf, 24, 32); + CHECK_MEMBER_AT(hwc_layer_1_t, displayFrame, 40, 48); + CHECK_MEMBER_AT(hwc_layer_1_t, visibleRegionScreen, 56, 64); + CHECK_MEMBER_AT(hwc_layer_1_t, acquireFenceFd, 64, 80); + CHECK_MEMBER_AT(hwc_layer_1_t, releaseFenceFd, 68, 84); + CHECK_MEMBER_AT(hwc_layer_1_t, planeAlpha, 72, 88); + CHECK_MEMBER_AT(hwc_layer_1_t, _pad, 73, 89); + + CHECK_MEMBER_AT(hwc_composer_device_1_t, common, 0, 0); + CHECK_MEMBER_AT(hwc_composer_device_1_t, prepare, 64, 120); + CHECK_MEMBER_AT(hwc_composer_device_1_t, set, 68, 128); + CHECK_MEMBER_AT(hwc_composer_device_1_t, eventControl, 72, 136); + CHECK_MEMBER_AT(hwc_composer_device_1_t, blank, 76, 144); + CHECK_MEMBER_AT(hwc_composer_device_1_t, query, 80, 152); + CHECK_MEMBER_AT(hwc_composer_device_1_t, registerProcs, 84, 160); + CHECK_MEMBER_AT(hwc_composer_device_1_t, dump, 88, 168); + CHECK_MEMBER_AT(hwc_composer_device_1_t, getDisplayConfigs, 92, 176); + CHECK_MEMBER_AT(hwc_composer_device_1_t, getDisplayAttributes, 96, 184); + CHECK_MEMBER_AT(hwc_composer_device_1_t, reserved_proc, 100, 192); + + //Types defined in gralloc.h + CHECK_MEMBER_AT(gralloc_module_t, common, 0, 0); + CHECK_MEMBER_AT(gralloc_module_t, registerBuffer, 128, 248); + CHECK_MEMBER_AT(gralloc_module_t, unregisterBuffer, 132, 256); + CHECK_MEMBER_AT(gralloc_module_t, lock, 136, 264); + CHECK_MEMBER_AT(gralloc_module_t, unlock, 140, 272); + CHECK_MEMBER_AT(gralloc_module_t, perform, 144, 280); + CHECK_MEMBER_AT(gralloc_module_t, lock_ycbcr, 148, 288); + CHECK_MEMBER_AT(gralloc_module_t, lockAsync, 152, 296); + CHECK_MEMBER_AT(gralloc_module_t, unlockAsync, 156, 304); + CHECK_MEMBER_AT(gralloc_module_t, lockAsync_ycbcr, 160, 312); + CHECK_MEMBER_AT(gralloc_module_t, reserved_proc, 164, 320); + + CHECK_MEMBER_AT(alloc_device_t, common, 0, 0); + CHECK_MEMBER_AT(alloc_device_t, alloc, 64, 120); + CHECK_MEMBER_AT(alloc_device_t, free, 68, 128); + CHECK_MEMBER_AT(alloc_device_t, dump, 72, 136); + CHECK_MEMBER_AT(alloc_device_t, reserved_proc, 76, 144); + + //Types defined in consumerir.h + CHECK_MEMBER_AT(consumerir_device_t, common, 0, 0); + CHECK_MEMBER_AT(consumerir_device_t, transmit, 64, 120); + CHECK_MEMBER_AT(consumerir_device_t, get_num_carrier_freqs, 68, 128); + CHECK_MEMBER_AT(consumerir_device_t, get_carrier_freqs, 72, 136); + CHECK_MEMBER_AT(consumerir_device_t, reserved, 76, 144); + + //Types defined in camera_common.h + CHECK_MEMBER_AT(vendor_tag_ops_t, get_tag_count, 0, 0); + CHECK_MEMBER_AT(vendor_tag_ops_t, get_all_tags, 4, 8); + CHECK_MEMBER_AT(vendor_tag_ops_t, get_section_name, 8, 16); + CHECK_MEMBER_AT(vendor_tag_ops_t, get_tag_name, 12, 24); + CHECK_MEMBER_AT(vendor_tag_ops_t, get_tag_type, 16, 32); + CHECK_MEMBER_AT(vendor_tag_ops_t, reserved, 20, 40); + + CHECK_MEMBER_AT(camera_module_t, common, 0, 0); + CHECK_MEMBER_AT(camera_module_t, get_number_of_cameras, 128, 248); + CHECK_MEMBER_AT(camera_module_t, get_camera_info, 132, 256); + CHECK_MEMBER_AT(camera_module_t, set_callbacks, 136, 264); + CHECK_MEMBER_AT(camera_module_t, get_vendor_tag_ops, 140, 272); + CHECK_MEMBER_AT(camera_module_t, reserved, 144, 280); + + //Types defined in camera3.h + CHECK_MEMBER_AT(camera3_device_ops_t, initialize, 0, 0); + CHECK_MEMBER_AT(camera3_device_ops_t, configure_streams, 4, 8); + CHECK_MEMBER_AT(camera3_device_ops_t, register_stream_buffers, 8, 16); + CHECK_MEMBER_AT(camera3_device_ops_t, construct_default_request_settings, 12, 24); + CHECK_MEMBER_AT(camera3_device_ops_t, process_capture_request, 16, 32); + CHECK_MEMBER_AT(camera3_device_ops_t, get_metadata_vendor_tag_ops, 20, 40); + CHECK_MEMBER_AT(camera3_device_ops_t, dump, 24, 48); + CHECK_MEMBER_AT(camera3_device_ops_t, flush, 28, 56); + CHECK_MEMBER_AT(camera3_device_ops_t, reserved, 32, 64); +} + diff --git a/tests/hardware/struct-size.cpp b/tests/hardware/struct-size.cpp new file mode 100644 index 0000000..4207ea8 --- /dev/null +++ b/tests/hardware/struct-size.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2012 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include <system/window.h> +#include <hardware/hardware.h> +#include <hardware/sensors.h> +#include <hardware/fb.h> +#include <hardware/hwcomposer.h> +#include <hardware/gralloc.h> +#include <hardware/consumerir.h> +#include <hardware/camera_common.h> +#include <hardware/camera3.h> + +template<size_t> static constexpr size_t CheckSizeHelper(size_t, size_t); + +template<> constexpr size_t CheckSizeHelper<4>(size_t size32, size_t size64) { + return size32; +} + +template<> constexpr size_t CheckSizeHelper<8>(size_t size32, size_t size64) { + return size64; +} + +template<typename T, size_t size32, size_t size64> static void CheckTypeSize() { + const size_t mySize = CheckSizeHelper<sizeof(void *)>(size32, size64); + + static_assert(sizeof(T) == mySize, "struct is the wrong size"); +} + +void CheckSizes(void) { + //Types defined in hardware.h + CheckTypeSize<hw_module_t, 128, 248>(); + CheckTypeSize<hw_device_t, 64, 120>(); + + //Types defined in sensors.h + CheckTypeSize<sensors_vec_t, 16, 16>(); + CheckTypeSize<sensors_event_t, 104, 104>(); + CheckTypeSize<struct sensor_t, 68, 104>(); + CheckTypeSize<sensors_poll_device_1_t, 116, 224>(); + + //Types defined in fb.h + CheckTypeSize<framebuffer_device_t, 184, 288>(); + + //Types defined in hwcomposer.h + CheckTypeSize<hwc_layer_1_t, 96, 120>(); + CheckTypeSize<hwc_composer_device_1_t, 116, 224>(); + + //Types defined in gralloc.h + CheckTypeSize<gralloc_module_t, 176, 344>(); + CheckTypeSize<alloc_device_t, 104, 200>(); + + //Types defined in consumerir.h + CheckTypeSize<consumerir_device_t, 96, 184>(); + + //Types defined in camera_common.h + CheckTypeSize<vendor_tag_ops_t, 52, 104>(); + CheckTypeSize<camera_module_t, 176, 344>(); + + //Types defined in camera3.h + CheckTypeSize<camera3_device_ops_t, 64, 128>(); +} + |