diff options
Diffstat (limited to 'include/hardware')
-rw-r--r-- | include/hardware/activity_recognition.h | 186 | ||||
-rw-r--r-- | include/hardware/audio.h | 3 | ||||
-rw-r--r-- | include/hardware/audio_alsaops.h | 102 | ||||
-rw-r--r-- | include/hardware/audio_effect.h | 2 | ||||
-rw-r--r-- | include/hardware/audio_policy.h | 7 | ||||
-rw-r--r-- | include/hardware/bt_gatt_client.h | 18 | ||||
-rw-r--r-- | include/hardware/bt_gatt_server.h | 4 | ||||
-rw-r--r-- | include/hardware/bt_gatt_types.h | 8 | ||||
-rw-r--r-- | include/hardware/bt_hf.h | 46 | ||||
-rw-r--r-- | include/hardware/camera3.h | 738 | ||||
-rw-r--r-- | include/hardware/camera_common.h | 66 | ||||
-rw-r--r-- | include/hardware/fingerprint.h | 127 | ||||
-rw-r--r-- | include/hardware/gps.h | 75 | ||||
-rw-r--r-- | include/hardware/hardware.h | 8 | ||||
-rw-r--r-- | include/hardware/hwcomposer.h | 50 | ||||
-rw-r--r-- | include/hardware/hwcomposer_defs.h | 5 | ||||
-rw-r--r-- | include/hardware/nfc_tag.h | 83 | ||||
-rw-r--r-- | include/hardware/power.h | 10 | ||||
-rw-r--r-- | include/hardware/sensors.h | 892 | ||||
-rw-r--r-- | include/hardware/tv_input.h | 333 |
20 files changed, 1906 insertions, 857 deletions
diff --git a/include/hardware/activity_recognition.h b/include/hardware/activity_recognition.h new file mode 100644 index 0000000..3d3c1bd --- /dev/null +++ b/include/hardware/activity_recognition.h @@ -0,0 +1,186 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Activity Recognition HAL. The goal is to provide low power, low latency, always-on activity + * recognition implemented in hardware (i.e. these activity recognition algorithms/classifers + * should NOT be run on the AP). By low power we mean that this may be activated 24/7 without + * impacting the battery drain speed (goal in order of 1mW including the power for sensors). + * This HAL does not specify the input sources that are used towards detecting these activities. + * It has one monitor interface which can be used to batch activities for always-on + * activity_recognition and if the latency is zero, the same interface can be used for low latency + * detection. + */ + +#ifndef ANDROID_ACTIVITY_RECOGNITION_INTERFACE_H +#define ANDROID_ACTIVITY_RECOGNITION_INTERFACE_H + +#include <hardware/hardware.h> + +__BEGIN_DECLS + +#define ACTIVITY_RECOGNITION_HEADER_VERSION 1 +#define ACTIVITY_RECOGNITION_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION_2(0, 1, ACTIVITY_RECOGNITION_HEADER_VERSION) + +#define ACTIVITY_RECOGNITION_HARDWARE_MODULE_ID "activity_recognition" +#define ACTIVITY_RECOGNITION_HARDWARE_INTERFACE "activity_recognition_hw_if" + +/* + * Define constants for various activity types. Multiple activities may be active at the same time + * and sometimes none of these activities may be active. + */ + +/* Reserved. get_supported_activities_list() should not return this activity. */ +#define DETECTED_ACTIVITY_RESERVED (0) + +#define DETECTED_ACTIVITY_IN_VEHICLE (1) + +#define DETECTED_ACTIVITY_ON_BICYCLE (2) + +#define DETECTED_ACTIVITY_WALKING (3) + +#define DETECTED_ACTIVITY_RUNNING (4) + +#define DETECTED_ACTIVITY_STILL (5) + +#define DETECTED_ACTIVITY_TILTING (6) + +/* Values for activity_event.event_types. */ +enum { + /* + * A flush_complete event which indicates that a flush() has been successfully completed. This + * does not correspond to any activity/event. An event of this type should be added to the end + * of a batch FIFO and it indicates that all the events in the batch FIFO have been successfully + * reported to the framework. An event of this type should be generated only if flush() has been + * explicitly called and if the FIFO is empty at the time flush() is called it should trivially + * return a flush_complete_event to indicate that the FIFO is empty. + * + * A flush complete event should have the following parameters set. + * activity_event_t.event_type = ACTIVITY_EVENT_TYPE_FLUSH_COMPLETE + * activity_event_t.detected_activity = DETECTED_ACTIVITY_RESERVED + * activity_event_t.timestamp = 0 + * activity_event_t.reserved = 0 + * See (*flush)() for more details. + */ + ACTIVITY_EVENT_TYPE_FLUSH_COMPLETE = 0, + + /* Signifies entering an activity. */ + ACTIVITY_EVENT_TYPE_ENTER = 1, + + /* Signifies exiting an activity. */ + ACTIVITY_EVENT_TYPE_EXIT = 2 +}; + +/* + * Each event is a separate activity with event_type indicating whether this activity has started + * or ended. Eg event: (event_type="enter", detected_activity="ON_FOOT", timestamp) + */ +typedef struct activity_event { + /* One of the ACTIVITY_EVENT_TYPE_* constants defined above. */ + uint32_t event_type; + + /* Detected Activity. One of DETECTED_ACTIVITY_TYPE_* constants defined above. */ + int32_t detected_activity; + + /* Time at which the transition/event has occurred in nanoseconds using elapsedRealTimeNano. */ + int64_t timestamp; + + /* Set to zero. */ + int32_t reserved[4]; +} activity_event_t; + +typedef struct activity_recognition_module { + hw_module_t common; + + /* + * List of all activities supported by this module. Each activity is represented as an integer. + * Each value in the list is one of the DETECTED_ACTIVITY_* constants defined above. Return + * value is the size of this list. + */ + int (*get_supported_activities_list)(struct activity_recognition_module* module, + int** activity_list); +} activity_recognition_module_t; + +struct activity_recognition_device; + +typedef struct activity_recognition_callback_procs { + // Callback for activity_data. This is guaranteed to not invoke any HAL methods. + // Memory allocated for the events can be reused after this method returns. + // events - Array of activity_event_t s that are reported. + // count - size of the array. + void (*activity_callback)(const struct activity_recognition_device* dev, + const activity_event_t* events, int count); +} activity_recognition_callback_procs_t; + +typedef struct activity_recognition_device { + hw_device_t common; + + /* + * Sets the callback to invoke when there are events to report. This call overwrites the + * previously registered callback (if any). + */ + void (*register_activity_callback)(const struct activity_recognition_device* dev, + const activity_recognition_callback_procs_t* callback); + + /* + * Activates and deactivates monitoring of activity transitions. Activities need not be reported + * as soon as they are detected. The detected activities are stored in a FIFO and reported in + * batches when the "max_batch_report_latency" expires or when the batch FIFO is full. The + * implementation should allow the AP to go into suspend mode while the activities are detected + * and stored in the batch FIFO. Whenever events need to be reported (like when the FIFO is full + * or when the max_batch_report_latency has expired for an activity, event pair), it should + * wake_up the AP so that no events are lost. Activities are stored as transitions and they are + * allowed to overlap with each other. + * detected_activity - The specific activity that needs to be monitored. + * event_type - Specific transition of the activity that needs to be monitored. + * enabled - Enable/Disable detection of an (detected_activity, event_type) pair. Each + * pair can be activated or deactivated independently of the other. The HAL + * implementation needs to keep track of which pairs are currently active + * and needs to detect only those activities. + * max_batch_report_latency - a transition can be delayed by at most + * “max_batch_report_latency” nanoseconds. + * Return 0 on success, negative errno code otherwise. + */ + int (*monitor_activity_event)(const struct activity_recognition_device* dev, + int32_t detected_activity, int32_t event_type, int64_t max_batch_report_latency_ns, + int32_t enabled); + + /* + * Flush all the batch FIFOs. Report all the activities that were stored in the FIFO so far as + * if max_batch_report_latency had expired. This shouldn't change the latency in any way. Add + * a flush_complete_event to indicate the end of the FIFO after all events are delivered. + * See ACTIVITY_EVENT_TYPE_FLUSH_COMPLETE for more details. + * Return 0 on success, negative errno code otherwise. + */ + int (*flush)(const struct activity_recognition_device* dev); + + // Must be set to NULL. + void (*reserved_procs[4])(void); +} activity_recognition_device_t; + +static inline int activity_recognition_open(const hw_module_t* module, + activity_recognition_device_t** device) { + return module->methods->open(module, + ACTIVITY_RECOGNITION_HARDWARE_INTERFACE, (hw_device_t**)device); +} + +static inline int activity_recognition_close(activity_recognition_device_t* device) { + return device->common.close(&device->common); +} + +__END_DECLS + +#endif // ANDROID_ACTIVITY_RECOGNITION_INTERFACE_H diff --git a/include/hardware/audio.h b/include/hardware/audio.h index 6ba2544..b53cbff 100644 --- a/include/hardware/audio.h +++ b/include/hardware/audio.h @@ -97,6 +97,9 @@ __BEGIN_DECLS /* Screen state */ #define AUDIO_PARAMETER_KEY_SCREEN_STATE "screen_state" +/* Bluetooth SCO wideband */ +#define AUDIO_PARAMETER_KEY_BT_SCO_WB "bt_wbs" + /** * audio stream parameters */ diff --git a/include/hardware/audio_alsaops.h b/include/hardware/audio_alsaops.h new file mode 100644 index 0000000..0d266ff --- /dev/null +++ b/include/hardware/audio_alsaops.h @@ -0,0 +1,102 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* This file contains shared utility functions to handle the tinyalsa + * implementation for Android internal audio, generally in the hardware layer. + * Some routines may log a fatal error on failure, as noted. + */ + +#ifndef ANDROID_AUDIO_ALSAOPS_H +#define ANDROID_AUDIO_ALSAOPS_H + +#include <cutils/log.h> +#include <system/audio.h> +#include <tinyalsa/asoundlib.h> + +__BEGIN_DECLS + +/* Converts audio_format to pcm_format. + * Parameters: + * format the audio_format_t to convert + * + * Logs a fatal error if format is not a valid convertible audio_format_t. + */ +static inline enum pcm_format pcm_format_from_audio_format(audio_format_t format) +{ + switch (format) { +#ifdef HAVE_BIG_ENDIAN + case AUDIO_FORMAT_PCM_16_BIT: + return PCM_FORMAT_S16_BE; + case AUDIO_FORMAT_PCM_24_BIT_PACKED: + return PCM_FORMAT_S24_3BE; + case AUDIO_FORMAT_PCM_32_BIT: + return PCM_FORMAT_S32_BE; + case AUDIO_FORMAT_PCM_8_24_BIT: + return PCM_FORMAT_S24_BE; +#else + case AUDIO_FORMAT_PCM_16_BIT: + return PCM_FORMAT_S16_LE; + case AUDIO_FORMAT_PCM_24_BIT_PACKED: + return PCM_FORMAT_S24_3LE; + case AUDIO_FORMAT_PCM_32_BIT: + return PCM_FORMAT_S32_LE; + case AUDIO_FORMAT_PCM_8_24_BIT: + return PCM_FORMAT_S24_LE; +#endif + case AUDIO_FORMAT_PCM_FLOAT: /* there is no equivalent for float */ + default: + LOG_ALWAYS_FATAL("pcm_format_from_audio_format: invalid audio format %#x", format); + return 0; + } +} + +/* Converts pcm_format to audio_format. + * Parameters: + * format the pcm_format to convert + * + * Logs a fatal error if format is not a valid convertible pcm_format. + */ +static inline audio_format_t audio_format_from_pcm_format(enum pcm_format format) +{ + switch (format) { +#ifdef HAVE_BIG_ENDIAN + case PCM_FORMAT_S16_BE: + return AUDIO_FORMAT_PCM_16_BIT; + case PCM_FORMAT_S24_3BE: + return AUDIO_FORMAT_PCM_24_BIT_PACKED; + case PCM_FORMAT_S24_BE: + return AUDIO_FORMAT_PCM_8_24_BIT; + case PCM_FORMAT_S32_BE: + return AUDIO_FORMAT_PCM_32_BIT; +#else + case PCM_FORMAT_S16_LE: + return AUDIO_FORMAT_PCM_16_BIT; + case PCM_FORMAT_S24_3LE: + return AUDIO_FORMAT_PCM_24_BIT_PACKED; + case PCM_FORMAT_S24_LE: + return AUDIO_FORMAT_PCM_8_24_BIT; + case PCM_FORMAT_S32_LE: + return AUDIO_FORMAT_PCM_32_BIT; +#endif + default: + LOG_ALWAYS_FATAL("audio_format_from_pcm_format: invalid pcm format %#x", format); + return 0; + } +} + +__END_DECLS + +#endif /* ANDROID_AUDIO_ALSAOPS_H */ diff --git a/include/hardware/audio_effect.h b/include/hardware/audio_effect.h index b49d02d..ee48e4c 100644 --- a/include/hardware/audio_effect.h +++ b/include/hardware/audio_effect.h @@ -815,7 +815,7 @@ typedef struct buffer_config_s { uint32_t samplingRate; // sampling rate uint32_t channels; // channel mask (see audio_channel_mask_t in audio.h) buffer_provider_t bufferProvider; // buffer provider - uint8_t format; // Audio format (see see audio_format_t in audio.h) + uint8_t format; // Audio format (see audio_format_t in audio.h) uint8_t accessMode; // read/write or accumulate in buffer (effect_buffer_access_e) uint16_t mask; // indicates which of the above fields is valid } buffer_config_t; diff --git a/include/hardware/audio_policy.h b/include/hardware/audio_policy.h index 4e75e02..cbaa31d 100644 --- a/include/hardware/audio_policy.h +++ b/include/hardware/audio_policy.h @@ -332,10 +332,9 @@ struct audio_policy_service_ops { audio_io_handle_t output, int delay_ms); - /* reroute a given stream type to the specified output */ - int (*set_stream_output)(void *service, - audio_stream_type_t stream, - audio_io_handle_t output); + /* invalidate a stream type, causing a reroute to an unspecified new output */ + int (*invalidate_stream)(void *service, + audio_stream_type_t stream); /* function enabling to send proprietary informations directly from audio * policy manager to audio hardware interface. */ diff --git a/include/hardware/bt_gatt_client.h b/include/hardware/bt_gatt_client.h index 11b146d..baed4bd 100644 --- a/include/hardware/bt_gatt_client.h +++ b/include/hardware/bt_gatt_client.h @@ -159,6 +159,9 @@ typedef void (*listen_callback)(int status, int server_if); /** Callback invoked when the MTU for a given connection changes */ typedef void (*configure_mtu_callback)(int conn_id, int status, int mtu); +/** Callback invoked when a scan filter configuration command has completed */ +typedef void (*scan_filter_callback)(int action, int status); + typedef struct { register_client_callback register_client_cb; scan_result_callback scan_result_cb; @@ -179,6 +182,7 @@ typedef struct { read_remote_rssi_callback read_remote_rssi_cb; listen_callback listen_cb; configure_mtu_callback configure_mtu_cb; + scan_filter_callback scan_filter_cb; } btgatt_client_callbacks_t; /** Represents the standard BT-GATT client interface. */ @@ -195,7 +199,7 @@ typedef struct { /** Create a connection to a remote LE or dual-mode device */ bt_status_t (*connect)( int client_if, const bt_bdaddr_t *bd_addr, - bool is_direct ); + bool is_direct, int transport ); /** Disconnect a remote device or cancel a pending connection */ bt_status_t (*disconnect)( int client_if, const bt_bdaddr_t *bd_addr, @@ -276,6 +280,17 @@ typedef struct { /** Request RSSI for a given remote device */ bt_status_t (*read_remote_rssi)( int client_if, const bt_bdaddr_t *bd_addr); + /** Enable or disable scan filtering */ + bt_status_t (*scan_filter_enable)( int enable ); + + /** Configure a scan filter condition */ + bt_status_t (*scan_filter_add)(int type, int company_id, int company_mask, + int len, const bt_uuid_t *p_uuid, const bt_uuid_t *p_uuid_mask, + const bt_bdaddr_t *bd_addr, char addr_type, const char* p_value); + + /** Clear all scan filter conditions */ + bt_status_t (*scan_filter_clear)(); + /** Determine the type of the remote device (LE, BR/EDR, Dual-mode) */ int (*get_device_type)( const bt_bdaddr_t *bd_addr ); @@ -291,6 +306,7 @@ typedef struct { /** Test mode interface */ bt_status_t (*test_command)( int command, btgatt_test_params_t* params); + } btgatt_client_interface_t; __END_DECLS diff --git a/include/hardware/bt_gatt_server.h b/include/hardware/bt_gatt_server.h index 1a5a400..32f8ef6 100644 --- a/include/hardware/bt_gatt_server.h +++ b/include/hardware/bt_gatt_server.h @@ -129,7 +129,8 @@ typedef struct { bt_status_t (*unregister_server)(int server_if ); /** Create a connection to a remote peripheral */ - bt_status_t (*connect)(int server_if, const bt_bdaddr_t *bd_addr, bool is_direct ); + bt_status_t (*connect)(int server_if, const bt_bdaddr_t *bd_addr, + bool is_direct, int transport); /** Disconnect an established connection or cancel a pending one */ bt_status_t (*disconnect)(int server_if, const bt_bdaddr_t *bd_addr, @@ -168,6 +169,7 @@ typedef struct { /** Send a response to a read/write operation */ bt_status_t (*send_response)(int conn_id, int trans_id, int status, btgatt_response_t *response); + } btgatt_server_interface_t; __END_DECLS diff --git a/include/hardware/bt_gatt_types.h b/include/hardware/bt_gatt_types.h index 0ac217e..e037ddc 100644 --- a/include/hardware/bt_gatt_types.h +++ b/include/hardware/bt_gatt_types.h @@ -43,6 +43,14 @@ typedef struct uint8_t is_primary; } btgatt_srvc_id_t; +/** Preferred physical Transport for GATT connection */ +typedef enum +{ + GATT_TRANSPORT_AUTO, + GATT_TRANSPORT_BREDR, + GATT_TRANSPORT_LE +} btgatt_transport_t; + __END_DECLS #endif /* ANDROID_INCLUDE_BT_GATT_TYPES_H */ diff --git a/include/hardware/bt_hf.h b/include/hardware/bt_hf.h index 6135ac4..e015c28 100644 --- a/include/hardware/bt_hf.h +++ b/include/hardware/bt_hf.h @@ -79,65 +79,65 @@ typedef void (* bthf_audio_state_callback)(bthf_audio_state_t state, bt_bdaddr_t /** Callback for VR connection state change. * state will have one of the values from BtHfVRState */ -typedef void (* bthf_vr_cmd_callback)(bthf_vr_state_t state); +typedef void (* bthf_vr_cmd_callback)(bthf_vr_state_t state, bt_bdaddr_t *bd_addr); /** Callback for answer incoming call (ATA) */ -typedef void (* bthf_answer_call_cmd_callback)(); +typedef void (* bthf_answer_call_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for disconnect call (AT+CHUP) */ -typedef void (* bthf_hangup_call_cmd_callback)(); +typedef void (* bthf_hangup_call_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for disconnect call (AT+CHUP) * type will denote Speaker/Mic gain (BtHfVolumeControl). */ -typedef void (* bthf_volume_cmd_callback)(bthf_volume_type_t type, int volume); +typedef void (* bthf_volume_cmd_callback)(bthf_volume_type_t type, int volume, bt_bdaddr_t *bd_addr); /** Callback for dialing an outgoing call * If number is NULL, redial */ -typedef void (* bthf_dial_call_cmd_callback)(char *number); +typedef void (* bthf_dial_call_cmd_callback)(char *number, bt_bdaddr_t *bd_addr); /** Callback for sending DTMF tones * tone contains the dtmf character to be sent */ -typedef void (* bthf_dtmf_cmd_callback)(char tone); +typedef void (* bthf_dtmf_cmd_callback)(char tone, bt_bdaddr_t *bd_addr); /** Callback for enabling/disabling noise reduction/echo cancellation * value will be 1 to enable, 0 to disable */ -typedef void (* bthf_nrec_cmd_callback)(bthf_nrec_t nrec); +typedef void (* bthf_nrec_cmd_callback)(bthf_nrec_t nrec, bt_bdaddr_t *bd_addr); /** Callback for call hold handling (AT+CHLD) * value will contain the call hold command (0, 1, 2, 3) */ -typedef void (* bthf_chld_cmd_callback)(bthf_chld_type_t chld); +typedef void (* bthf_chld_cmd_callback)(bthf_chld_type_t chld, bt_bdaddr_t *bd_addr); /** Callback for CNUM (subscriber number) */ -typedef void (* bthf_cnum_cmd_callback)(); +typedef void (* bthf_cnum_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for indicators (CIND) */ -typedef void (* bthf_cind_cmd_callback)(); +typedef void (* bthf_cind_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for operator selection (COPS) */ -typedef void (* bthf_cops_cmd_callback)(); +typedef void (* bthf_cops_cmd_callback)(bt_bdaddr_t *bd_addr); /** Callback for call list (AT+CLCC) */ -typedef void (* bthf_clcc_cmd_callback) (); +typedef void (* bthf_clcc_cmd_callback) (bt_bdaddr_t *bd_addr); /** Callback for unknown AT command recd from HF * at_string will contain the unparsed AT string */ -typedef void (* bthf_unknown_at_cmd_callback)(char *at_string); +typedef void (* bthf_unknown_at_cmd_callback)(char *at_string, bt_bdaddr_t *bd_addr); /** Callback for keypressed (HSP) event. */ -typedef void (* bthf_key_pressed_cmd_callback)(); +typedef void (* bthf_key_pressed_cmd_callback)(bt_bdaddr_t *bd_addr); /** BT-HF callback structure. */ typedef struct { @@ -213,7 +213,7 @@ typedef struct { /** * Register the BtHf callbacks */ - bt_status_t (*init)( bthf_callbacks_t* callbacks ); + bt_status_t (*init)( bthf_callbacks_t* callbacks, int max_hf_clients); /** connect to headset */ bt_status_t (*connect)( bt_bdaddr_t *bd_addr ); @@ -228,33 +228,33 @@ typedef struct { bt_status_t (*disconnect_audio)( bt_bdaddr_t *bd_addr ); /** start voice recognition */ - bt_status_t (*start_voice_recognition)(); + bt_status_t (*start_voice_recognition)( bt_bdaddr_t *bd_addr ); /** stop voice recognition */ - bt_status_t (*stop_voice_recognition)(); + bt_status_t (*stop_voice_recognition)( bt_bdaddr_t *bd_addr ); /** volume control */ - bt_status_t (*volume_control) (bthf_volume_type_t type, int volume); + bt_status_t (*volume_control) (bthf_volume_type_t type, int volume, bt_bdaddr_t *bd_addr ); /** Combined device status change notification */ bt_status_t (*device_status_notification)(bthf_network_state_t ntk_state, bthf_service_type_t svc_type, int signal, int batt_chg); /** Response for COPS command */ - bt_status_t (*cops_response)(const char *cops); + bt_status_t (*cops_response)(const char *cops, bt_bdaddr_t *bd_addr ); /** Response for CIND command */ bt_status_t (*cind_response)(int svc, int num_active, int num_held, bthf_call_state_t call_setup_state, - int signal, int roam, int batt_chg); + int signal, int roam, int batt_chg, bt_bdaddr_t *bd_addr ); /** Pre-formatted AT response, typically in response to unknown AT cmd */ - bt_status_t (*formatted_at_response)(const char *rsp); + bt_status_t (*formatted_at_response)(const char *rsp, bt_bdaddr_t *bd_addr ); /** ok/error response * ERROR (0) * OK (1) */ - bt_status_t (*at_response) (bthf_at_response_t response_code, int error_code); + bt_status_t (*at_response) (bthf_at_response_t response_code, int error_code, bt_bdaddr_t *bd_addr ); /** response for CLCC command * Can be iteratively called for each call index @@ -263,7 +263,7 @@ typedef struct { bt_status_t (*clcc_response) (int index, bthf_call_direction_t dir, bthf_call_state_t state, bthf_call_mode_t mode, bthf_call_mpty_type_t mpty, const char *number, - bthf_call_addrtype_t type); + bthf_call_addrtype_t type, bt_bdaddr_t *bd_addr ); /** notify of a call state change * Each update notifies diff --git a/include/hardware/camera3.h b/include/hardware/camera3.h index afc9d9f..6623dc7 100644 --- a/include/hardware/camera3.h +++ b/include/hardware/camera3.h @@ -21,19 +21,25 @@ #include "camera_common.h" /** - * Camera device HAL 3.1 [ CAMERA_DEVICE_API_VERSION_3_1 ] + * Camera device HAL 3.2 [ CAMERA_DEVICE_API_VERSION_3_2 ] * * EXPERIMENTAL. * * Supports the android.hardware.Camera API. * * Camera devices that support this version of the HAL must return - * CAMERA_DEVICE_API_VERSION_3_1 in camera_device_t.common.version and in + * CAMERA_DEVICE_API_VERSION_3_2 in camera_device_t.common.version and in * camera_info_t.device_version (from camera_module_t.get_camera_info). * - * Camera modules that may contain version 3.1 devices must implement at least - * version 2.0 of the camera module interface (as defined by - * camera_module_t.common.module_api_version). + * CAMERA_DEVICE_API_VERSION_3_2: + * Camera modules that may contain version 3.2 devices must implement at + * least version 2.2 of the camera module interface (as defined by + * camera_module_t.common.module_api_version). + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * Camera modules that may contain version 3.1 (or 3.0) devices must + * implement at least version 2.0 of the camera module interface + * (as defined by camera_module_t.common.module_api_version). * * See camera_common.h for more versioning details. * @@ -44,6 +50,9 @@ * S4. 3A modes and state machines * S5. Cropping * S6. Error management + * S7. Key Performance Indicator (KPI) glossary + * S8. Sample Use Cases + * S9. Notes on Controls and Metadata */ /** @@ -88,6 +97,27 @@ * - configure_streams passes consumer usage flags to the HAL. * * - flush call to drop all in-flight requests/buffers as fast as possible. + * + * 3.2: Minor revision of expanded-capability HAL: + * + * - Deprecates get_metadata_vendor_tag_ops. Please use get_vendor_tag_ops + * in camera_common.h instead. + * + * - register_stream_buffers deprecated. All gralloc buffers provided + * by framework to HAL in process_capture_request may be new at any time. + * + * - add partial result support. process_capture_result may be called + * multiple times with a subset of the available result before the full + * result is available. + * + * - add manual template to camera3_request_template. The applications may + * use this template to control the capture settings directly. + * + * - Rework the bidirectional and input stream specifications. + * + * - change the input buffer return path. The buffer is returned in + * process_capture_result instead of process_capture_request. + * */ /** @@ -108,12 +138,19 @@ * 4. The framework calls camera3_device_t->ops->configure_streams() with a list * of input/output streams to the HAL device. * - * 5. The framework allocates gralloc buffers and calls + * 5. <= CAMERA_DEVICE_API_VERSION_3_1: + * + * The framework allocates gralloc buffers and calls * camera3_device_t->ops->register_stream_buffers() for at least one of the * output streams listed in configure_streams. The same stream is registered * only once. * - * 5. The framework requests default settings for some number of use cases with + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * camera3_device_t->ops->register_stream_buffers() is not called and must + * be NULL. + * + * 6. The framework requests default settings for some number of use cases with * calls to camera3_device_t->ops->construct_default_request_settings(). This * may occur any time after step 3. * @@ -124,23 +161,64 @@ * camera3_device_t->ops->process_capture_request(). The HAL must block the * return of this call until it is ready for the next request to be sent. * - * 8. The framework continues to submit requests, and possibly call - * register_stream_buffers() for not-yet-registered streams, and call + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The buffer_handle_t provided in the camera3_stream_buffer_t array + * in the camera3_capture_request_t may be new and never-before-seen + * by the HAL on any given new request. + * + * 8. The framework continues to submit requests, and call * construct_default_request_settings to get default settings buffers for * other use cases. * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * The framework may call register_stream_buffers() at this time for + * not-yet-registered streams. + * * 9. When the capture of a request begins (sensor starts exposing for the * capture), the HAL calls camera3_callback_ops_t->notify() with the SHUTTER * event, including the frame number and the timestamp for start of exposure. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * This notify call must be made before the first call to * process_capture_result() for that frame number. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The camera3_callback_ops_t->notify() call with the SHUTTER event should + * be made as early as possible since the framework will be unable to + * deliver gralloc buffers to the application layer (for that frame) until + * it has a valid timestamp for the start of exposure. + * + * Both partial metadata results and the gralloc buffers may be sent to the + * framework at any time before or after the SHUTTER event. + * * 10. After some pipeline delay, the HAL begins to return completed captures to * the framework with camera3_callback_ops_t->process_capture_result(). These * are returned in the same order as the requests were submitted. Multiple * requests can be in flight at once, depending on the pipeline depth of the * camera HAL device. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Once a buffer is returned by process_capture_result as part of the + * camera3_stream_buffer_t array, and the fence specified by release_fence + * has been signaled (this is a no-op for -1 fences), the ownership of that + * buffer is considered to be transferred back to the framework. After that, + * the HAL must no longer retain that particular buffer, and the + * framework may clean up the memory for it immediately. + * + * process_capture_result may be called multiple times for a single frame, + * each time with a new disjoint piece of metadata and/or set of gralloc + * buffers. The framework will accumulate these partial metadata results + * into one result. + * + * In particular, it is legal for a process_capture_result to be called + * simultaneously for both a frame N and a frame N+1 as long as the + * above rule holds for gralloc buffers (both input and output). + * * 11. After some time, the framework may stop submitting new requests, wait for * the existing captures to complete (all buffers filled, all results * returned), and then call configure_streams() again. This resets the camera @@ -276,13 +354,10 @@ * * android.scaler.cropRegion (controls) * [ignores (x,y), assumes center-zoom] - * android.scaler.availableFormats (static) - * [RAW not supported] - * android.scaler.availableJpegMinDurations (static) - * android.scaler.availableJpegSizes (static) + * android.scaler.availableStreamConfigurations (static) + * android.scaler.availableMinFrameDurations (static) + * android.scaler.availableStallDurations (static) * android.scaler.availableMaxDigitalZoom (static) - * android.scaler.availableProcessedMinDurations (static) - * android.scaler.availableProcessedSizes (static) * [full resolution not supported] * android.scaler.maxDigitalZoom (static) * android.scaler.cropRegion (dynamic) @@ -963,15 +1038,134 @@ * ERROR_BUFFER for each failed buffer. * * In each of these transient failure cases, the HAL must still call - * process_capture_result, with valid output buffer_handle_t. If the result - * metadata could not be produced, it should be NULL. If some buffers could not - * be filled, their sync fences must be set to the error state. + * process_capture_result, with valid output and input (if an input buffer was + * submitted) buffer_handle_t. If the result metadata could not be produced, it + * should be NULL. If some buffers could not be filled, they must be returned with + * process_capture_result in the error state, their release fences must be set to + * the acquire fences passed by the framework, or -1 if they have been waited on by + * the HAL already. * * Invalid input arguments result in -EINVAL from the appropriate methods. In * that case, the framework must act as if that call had never been made. * */ +/** + * S7. Key Performance Indicator (KPI) glossary: + * + * This includes some critical definitions that are used by KPI metrics. + * + * Pipeline Latency: + * For a given capture request, the duration from the framework calling + * process_capture_request to the HAL sending capture result and all buffers + * back by process_capture_result call. To make the Pipeline Latency measure + * independent of frame rate, it is measured by frame count. + * + * For example, when frame rate is 30 (fps), the frame duration (time interval + * between adjacent frame capture time) is 33 (ms). + * If it takes 5 frames for framework to get the result and buffers back for + * a given request, then the Pipeline Latency is 5 (frames), instead of + * 5 x 33 = 165 (ms). + * + * The Pipeline Latency is determined by android.request.pipelineDepth and + * android.request.pipelineMaxDepth, see their definitions for more details. + * + */ + +/** + * S8. Sample Use Cases: + * + * This includes some typical use case examples the camera HAL may support. + * + * S8.1 Zero Shutter Lag (ZSL) with CAMERA3_STREAM_INPUT stream. + * + * When Zero Shutter Lag (ZSL) is supported by the camera device, the INPUT stream + * can be used for application/framework implemented ZSL use case. This kind of stream + * will be used by the framework as follows: + * + * 1. Framework configures an opaque raw format output stream that is used to + * produce the ZSL output buffers. The stream pixel format will be + * HAL_PIXEL_FORMAT_RAW_OPAQUE. + * + * 2. Framework configures an opaque raw format input stream that is used to + * send the reprocess ZSL buffers to the HAL. The stream pixel format will + * also be HAL_PIXEL_FORMAT_RAW_OPAQUE. + * + * 3. Framework configures a YUV/JPEG output stream that is used to receive the + * reprocessed data. The stream pixel format will be YCbCr_420/HAL_PIXEL_FORMAT_BLOB. + * + * 4. Framework picks a ZSL buffer from the output stream when a ZSL capture is + * issued by the application, and sends the data back as an input buffer in a + * reprocessing request, then sends to the HAL for reprocessing. + * + * 5. The HAL sends back the output JPEG result to framework. + * + * The HAL can select the actual raw buffer format and configure the ISP pipeline + * appropriately based on the HAL_PIXEL_FORMAT_RAW_OPAQUE format. See this format + * definition for more details. + * + * S8.2 Zero Shutter Lag (ZSL) with CAMERA3_STREAM_BIDIRECTIONAL stream. + * + * For this use case, the bidirectional stream will be used by the framework as follows: + * + * 1. The framework includes a buffer from this stream as output buffer in a + * request as normal. + * + * 2. Once the HAL device returns a filled output buffer to the framework, + * the framework may do one of two things with the filled buffer: + * + * 2. a. The framework uses the filled data, and returns the now-used buffer + * to the stream queue for reuse. This behavior exactly matches the + * OUTPUT type of stream. + * + * 2. b. The framework wants to reprocess the filled data, and uses the + * buffer as an input buffer for a request. Once the HAL device has + * used the reprocessing buffer, it then returns it to the + * framework. The framework then returns the now-used buffer to the + * stream queue for reuse. + * + * 3. The HAL device will be given the buffer again as an output buffer for + * a request at some future point. + * + * For ZSL use case, the pixel format for bidirectional stream will be + * HAL_PIXEL_FORMAT_RAW_OPAQUE if it is listed in + * android.scaler.availableInputOutputFormatsMap. A configuration stream list + * that has BIDIRECTIONAL stream used as input, will usually also have a + * distinct OUTPUT stream to get the reprocessing data. For example, for the + * ZSL use case, the stream list might be configured with the following: + * + * - A HAL_PIXEL_FORMAT_RAW_OPAQUE bidirectional stream is used + * as input. + * - And a HAL_PIXEL_FORMAT_BLOB (JPEG) output stream. + * + */ + +/** + * S9. Notes on Controls and Metadata + * + * This section contains notes about the interpretation and usage of various metadata tags. + * + * S9.1 HIGH_QUALITY and FAST modes. + * + * Many camera post-processing blocks may be listed as having HIGH_QUALITY, + * FAST, and OFF operating modes. These blocks will typically also have an + * 'available modes' tag representing which of these operating modes are + * available on a given device. The general policy regarding implementing + * these modes is as follows: + * + * 1. Operating mode controls of hardware blocks that cannot be disabled + * must not list OFF in their corresponding 'available modes' tags. + * + * 2. OFF will always be included in their corresponding 'available modes' + * tag if it is possible to disable that hardware block. + * + * 3. FAST must always be included in the 'available modes' tags for all + * post-processing blocks supported on the device. If a post-processing + * block also has a slower and higher quality operating mode that does + * not meet the framerate requirements for FAST mode, HIGH_QUALITY should + * be included in the 'available modes' tag to represent this operating + * mode. + */ __BEGIN_DECLS struct camera3_device; @@ -1006,6 +1200,21 @@ typedef enum camera3_stream_type { * for reading buffers from this stream and sending them through the camera * processing pipeline, as if the buffer was a newly captured image from the * imager. + * + * The pixel format for input stream can be any format reported by + * android.scaler.availableInputOutputFormatsMap. The pixel format of the + * output stream that is used to produce the reprocessing data may be any + * format reported by android.scaler.availableStreamConfigurations. The + * supported input/output stream combinations depends the camera device + * capabilities, see android.scaler.availableInputOutputFormatsMap for + * stream map details. + * + * This kind of stream is generally used to reprocess data into higher + * quality images (that otherwise would cause a frame rate performance + * loss), or to do off-line reprocessing. + * + * A typical use case is Zero Shutter Lag (ZSL), see S8.1 for more details. + * */ CAMERA3_STREAM_INPUT = 1, @@ -1014,29 +1223,9 @@ typedef enum camera3_stream_type { * used as an output stream, but occasionally one already-filled buffer may * be sent back to the HAL device for reprocessing. * - * This kind of stream is meant generally for zero-shutter-lag features, - * where copying the captured image from the output buffer to the - * reprocessing input buffer would be expensive. The stream will be used by - * the framework as follows: - * - * 1. The framework includes a buffer from this stream as output buffer in a - * request as normal. - * - * 2. Once the HAL device returns a filled output buffer to the framework, - * the framework may do one of two things with the filled buffer: - * - * 2. a. The framework uses the filled data, and returns the now-used buffer - * to the stream queue for reuse. This behavior exactly matches the - * OUTPUT type of stream. - * - * 2. b. The framework wants to reprocess the filled data, and uses the - * buffer as an input buffer for a request. Once the HAL device has - * used the reprocessing buffer, it then returns it to the - * framework. The framework then returns the now-used buffer to the - * stream queue for reuse. - * - * 3. The HAL device will be given the buffer again as an output buffer for - * a request at some future point. + * This kind of stream is meant generally for Zero Shutter Lag (ZSL) + * features, where copying the captured image from the output buffer to the + * reprocessing input buffer would be expensive. See S8.2 for more details. * * Note that the HAL will always be reprocessing data it produced. * @@ -1105,9 +1294,17 @@ typedef struct camera3_stream { * gralloc module will select a format based on the usage flags provided by * the camera device and the other endpoint of the stream. * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * The camera HAL device must inspect the buffers handed to it in the * subsequent register_stream_buffers() call to obtain the * implementation-specific format details, if necessary. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * register_stream_buffers() won't be called by the framework, so the HAL + * should configure the ISP and sensor pipeline based purely on the sizes, + * usage flags, and formats for the configured streams. */ int format; @@ -1257,6 +1454,14 @@ typedef struct camera3_stream_buffer { * * For input buffers, the HAL must not change the acquire_fence field during * the process_capture_request() call. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * When the HAL returns an input buffer to the framework with + * process_capture_result(), the acquire_fence must be set to -1. If the HAL + * never waits on input buffer acquire fence due to an error, the sync + * fences should be handled similarly to the way they are handled for output + * buffers. */ int acquire_fence; @@ -1265,10 +1470,25 @@ typedef struct camera3_stream_buffer { * returning buffers to the framework, or write -1 to indicate that no * waiting is required for this buffer. * + * For the output buffers, the fences must be set in the output_buffers + * array passed to process_capture_result(). + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * For the input buffer, the release fence must be set by the - * process_capture_request() call. For the output buffers, the fences must - * be set in the output_buffers array passed to process_capture_result(). + * process_capture_request() call. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * For the input buffer, the fences must be set in the input_buffer + * passed to process_capture_result(). + * + * After signaling the release_fence for this buffer, the HAL + * should not make any further attempts to access this buffer as the + * ownership has been fully transferred back to the framework. * + * If a fence of -1 was specified then the ownership of this buffer + * is transferred back immediately upon the call of process_capture_result. */ int release_fence; @@ -1280,6 +1500,12 @@ typedef struct camera3_stream_buffer { * The complete set of gralloc buffers for a stream. This structure is given to * register_stream_buffers() to allow the camera HAL device to register/map/etc * newly allocated stream buffers. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Deprecated (and not used). In particular, + * register_stream_buffers is also deprecated and will never be invoked. + * */ typedef struct camera3_stream_buffer_set { /** @@ -1309,17 +1535,18 @@ typedef struct camera3_stream_buffer_set { * Transport header for compressed JPEG buffers in output streams. * * To capture JPEG images, a stream is created using the pixel format - * HAL_PIXEL_FORMAT_BLOB, and the static metadata field android.jpeg.maxSize is - * used as the buffer size. Since compressed JPEG images are of variable size, - * the HAL needs to include the final size of the compressed image using this - * structure inside the output stream buffer. The JPEG blob ID field must be set - * to CAMERA3_JPEG_BLOB_ID. - * - * Transport header should be at the end of the JPEG output stream buffer. That - * means the jpeg_blob_id must start at byte[android.jpeg.maxSize - - * sizeof(camera3_jpeg_blob)]. Any HAL using this transport header must - * account for it in android.jpeg.maxSize. The JPEG data itself starts at - * the beginning of the buffer and should be jpeg_size bytes long. + * HAL_PIXEL_FORMAT_BLOB. The buffer size for the stream is calculated by the + * framework, based on the static metadata field android.jpeg.maxSize. Since + * compressed JPEG images are of variable size, the HAL needs to include the + * final size of the compressed image using this structure inside the output + * stream buffer. The JPEG blob ID field must be set to CAMERA3_JPEG_BLOB_ID. + * + * Transport header should be at the end of the JPEG output stream buffer. That + * means the jpeg_blob_id must start at byte[buffer_size - + * sizeof(camera3_jpeg_blob)], where the buffer_size is the size of gralloc buffer. + * Any HAL using this transport header must account for it in android.jpeg.maxSize + * The JPEG data itself starts at the beginning of the buffer and should be + * jpeg_size bytes long. */ typedef struct camera3_jpeg_blob { uint16_t jpeg_blob_id; @@ -1534,6 +1761,16 @@ typedef enum camera3_request_template { */ CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG = 5, + /** + * A basic template for direct application control of capture + * parameters. All automatic control is disabled (auto-exposure, auto-white + * balance, auto-focus), and post-processing parameters are set to preview + * quality. The manual capture parameters (exposure, sensitivity, etc.) + * are set to reasonable defaults, but should be overridden by the + * application depending on the intended use case. + */ + CAMERA3_TEMPLATE_MANUAL = 6, + /* Total number of templates */ CAMERA3_TEMPLATE_COUNT, @@ -1592,8 +1829,15 @@ typedef struct camera3_capture_request { * The HAL is required to wait on the acquire sync fence of the input buffer * before accessing it. * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * Any input buffer included here will have been registered with the HAL * through register_stream_buffers() before its inclusion in a request. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The buffers will not have been pre-registered with the HAL. + * Subsequent requests may reuse buffers, or provide entirely new buffers. */ camera3_stream_buffer_t *input_buffer; @@ -1606,13 +1850,21 @@ typedef struct camera3_capture_request { /** * An array of num_output_buffers stream buffers, to be filled with image * data from this capture/reprocess. The HAL must wait on the acquire fences - * of each stream buffer before writing to them. All the buffers included - * here will have been registered with the HAL through - * register_stream_buffers() before their inclusion in a request. + * of each stream buffer before writing to them. * * The HAL takes ownership of the actual buffer_handle_t entries in * output_buffers; the framework does not access them until they are * returned in a camera3_capture_result_t. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * + * All the buffers included here will have been registered with the HAL + * through register_stream_buffers() before their inclusion in a request. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Any or all of the buffers included here may be brand new in this + * request (having never before seen by the HAL). */ const camera3_stream_buffer_t *output_buffers; @@ -1625,7 +1877,9 @@ typedef struct camera3_capture_request { * sent to the framework asynchronously with process_capture_result(), in * response to a single capture request sent to the HAL with * process_capture_request(). Multiple process_capture_result() calls may be - * performed by the HAL for each request. Each call, all with the same frame + * performed by the HAL for each request. + * + * Each call, all with the same frame * number, may contain some subset of the output buffers, and/or the result * metadata. The metadata may only be provided once for a given frame number; * all other calls must set the result metadata to NULL. @@ -1635,6 +1889,29 @@ typedef struct camera3_capture_request { * output buffer may come with a release sync fence that the framework will wait * on before reading, in case the buffer has not yet been filled by the HAL. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The metadata may be provided multiple times for a single frame number. The + * framework will accumulate together the final result set by combining each + * partial result together into the total result set. + * + * If an input buffer is given in a request, the HAL must return it in one of + * the process_capture_result calls, and the call may be to just return the input + * buffer, without metadata and output buffers; the sync fences must be handled + * the same way they are done for output buffers. + * + * + * Performance considerations: + * + * Applications will also receive these partial results immediately, so sending + * partial results is a highly recommended performance optimization to avoid + * the total pipeline latency before sending the results for what is known very + * early on in the pipeline. + * + * A typical use case might be calculating the AF state halfway through the + * pipeline; by sending the state back to the framework immediately, we get a + * 50% performance increase and perceived responsiveness of the auto-focus. + * */ typedef struct camera3_capture_result { /** @@ -1657,6 +1934,18 @@ typedef struct camera3_capture_result { * * If there was an error producing the result metadata, result must be an * empty metadata buffer, and notify() must be called with ERROR_RESULT. + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Multiple calls to process_capture_result() with a given frame_number + * may include the result metadata. + * + * Partial metadata submitted should not include any metadata key returned + * in a previous partial result for a given frame. Each new partial result + * for that frame must also set a distinct partial_result value. + * + * If notify has been called with ERROR_RESULT, all further partial + * results for that frame are ignored by the framework. */ const camera_metadata_t *result; @@ -1690,9 +1979,71 @@ typedef struct camera3_capture_result { * num_output_buffers is zero, this may be NULL. In that case, at least one * more process_capture_result call must be made by the HAL to provide the * output buffers. + * + * When process_capture_result is called with a new buffer for a frame, + * all previous frames' buffers for that corresponding stream must have been + * already delivered (the fences need not have yet been signaled). + * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Gralloc buffers for a frame may be sent to framework before the + * corresponding SHUTTER-notify. + * + * Performance considerations: + * + * Buffers delivered to the framework will not be dispatched to the + * application layer until a start of exposure timestamp has been received + * via a SHUTTER notify() call. It is highly recommended to + * dispatch that call as early as possible. */ const camera3_stream_buffer_t *output_buffers; + /** + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The handle for the input stream buffer for this capture. It may not + * yet be consumed at the time the HAL calls process_capture_result(); the + * framework will wait on the release sync fences provided by the HAL before + * reusing the buffer. + * + * The HAL should handle the sync fences the same way they are done for + * output_buffers. + * + * Only one input buffer is allowed to be sent per request. Similarly to + * output buffers, the ordering of returned input buffers must be + * maintained by the HAL. + * + * Performance considerations: + * + * The input buffer should be returned as early as possible. If the HAL + * supports sync fences, it can call process_capture_result to hand it back + * with sync fences being set appropriately. If the sync fences are not + * supported, the buffer can only be returned when it is consumed, which + * may take long time; the HAL may choose to copy this input buffer to make + * the buffer return sooner. + */ + const camera3_stream_buffer_t *input_buffer; + + /** + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * In order to take advantage of partial results, the HAL must set the + * static metadata android.request.partialResultCount to the number of + * partial results it will send for each frame. + * + * Each new capture result with a partial result must set + * this field (partial_result) to a distinct inclusive value between + * 1 and android.request.partialResultCount. + * + * HALs not wishing to take advantage of this feature must not + * set an android.request.partialResultCount or partial_result to a value + * other than 1. + * + * This value must be set to 0 when a capture result contains buffers only + * and no metadata. + */ + uint32_t partial_result; + } camera3_capture_result_t; /********************************************************************** @@ -1768,6 +2119,13 @@ typedef struct camera3_callback_ops { * message. In this case, individual ERROR_RESULT/ERROR_BUFFER messages * should not be sent. * + * Performance requirements: + * + * This is a non-blocking call. The framework will return this call in 5ms. + * + * The pipeline latency (see S7 for definition) should be less than or equal to + * 4 frame intervals, and must be less than or equal to 8 frame intervals. + * */ void (*process_capture_result)(const struct camera3_callback_ops *, const camera3_capture_result_t *result); @@ -1781,11 +2139,25 @@ typedef struct camera3_callback_ops { * with the HAL, and the msg only needs to be valid for the duration of this * call. * + * Multiple threads may call notify() simultaneously. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * The notification for the start of exposure for a given request must be * sent by the HAL before the first call to process_capture_result() for * that request is made. * - * Multiple threads may call notify() simultaneously. + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Buffers delivered to the framework will not be dispatched to the + * application layer until a start of exposure timestamp has been received + * via a SHUTTER notify() call. It is highly recommended to + * dispatch this call as early as possible. + * + * ------------------------------------------------------------------------ + * Performance requirements: + * + * This is a non-blocking call. The framework will return this call in 5ms. */ void (*notify)(const struct camera3_callback_ops *, const camera3_notify_msg_t *msg); @@ -1806,6 +2178,11 @@ typedef struct camera3_device_ops { * the HAL. Will be called once after a successful open() call, before any * other functions are called on the camera3_device_ops structure. * + * Performance requirements: + * + * This should be a non-blocking call. The HAL should return from this call + * in 5ms, and must return from this call in 10ms. + * * Return values: * * 0: On successful initialization @@ -1823,6 +2200,8 @@ typedef struct camera3_device_ops { /** * configure_streams: * + * CAMERA_DEVICE_API_VERSION_3_0 only: + * * Reset the HAL camera device processing pipeline and set up new input and * output streams. This call replaces any existing stream configuration with * the streams defined in the stream_list. This method will be called at @@ -1835,16 +2214,19 @@ typedef struct camera3_device_ops { * The stream_list may contain streams that are also in the currently-active * set of streams (from the previous call to configure_stream()). These * streams will already have valid values for usage, max_buffers, and the - * private pointer. If such a stream has already had its buffers registered, + * private pointer. + * + * If such a stream has already had its buffers registered, * register_stream_buffers() will not be called again for the stream, and * buffers from the stream can be immediately included in input requests. * * If the HAL needs to change the stream configuration for an existing * stream due to the new configuration, it may rewrite the values of usage - * and/or max_buffers during the configure call. The framework will detect - * such a change, and will then reallocate the stream buffers, and call - * register_stream_buffers() again before using buffers from that stream in - * a request. + * and/or max_buffers during the configure call. + * + * The framework will detect such a change, and will then reallocate the + * stream buffers, and call register_stream_buffers() again before using + * buffers from that stream in a request. * * If a currently-active stream is not included in stream_list, the HAL may * safely remove any references to that stream. It will not be reused in a @@ -1873,6 +2255,115 @@ typedef struct camera3_device_ops { * of (for example) a preview stream, with allocation for other streams * happening later or concurrently. * + * ------------------------------------------------------------------------ + * CAMERA_DEVICE_API_VERSION_3_1 only: + * + * Reset the HAL camera device processing pipeline and set up new input and + * output streams. This call replaces any existing stream configuration with + * the streams defined in the stream_list. This method will be called at + * least once after initialize() before a request is submitted with + * process_capture_request(). + * + * The stream_list must contain at least one output-capable stream, and may + * not contain more than one input-capable stream. + * + * The stream_list may contain streams that are also in the currently-active + * set of streams (from the previous call to configure_stream()). These + * streams will already have valid values for usage, max_buffers, and the + * private pointer. + * + * If such a stream has already had its buffers registered, + * register_stream_buffers() will not be called again for the stream, and + * buffers from the stream can be immediately included in input requests. + * + * If the HAL needs to change the stream configuration for an existing + * stream due to the new configuration, it may rewrite the values of usage + * and/or max_buffers during the configure call. + * + * The framework will detect such a change, and will then reallocate the + * stream buffers, and call register_stream_buffers() again before using + * buffers from that stream in a request. + * + * If a currently-active stream is not included in stream_list, the HAL may + * safely remove any references to that stream. It will not be reused in a + * later configure() call by the framework, and all the gralloc buffers for + * it will be freed after the configure_streams() call returns. + * + * The stream_list structure is owned by the framework, and may not be + * accessed once this call completes. The address of an individual + * camera3_stream_t structure will remain valid for access by the HAL until + * the end of the first configure_stream() call which no longer includes + * that camera3_stream_t in the stream_list argument. The HAL may not change + * values in the stream structure outside of the private pointer, except for + * the usage and max_buffers members during the configure_streams() call + * itself. + * + * If the stream is new, max_buffer, and private pointer fields of the + * stream structure will all be set to 0. The usage will be set to the + * consumer usage flags. The HAL device must set these fields before the + * configure_streams() call returns. These fields are then used by the + * framework and the platform gralloc module to allocate the gralloc + * buffers for each stream. + * + * Before such a new stream can have its buffers included in a capture + * request, the framework will call register_stream_buffers() with that + * stream. However, the framework is not required to register buffers for + * _all_ streams before submitting a request. This allows for quick startup + * of (for example) a preview stream, with allocation for other streams + * happening later or concurrently. + * + * ------------------------------------------------------------------------ + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * Reset the HAL camera device processing pipeline and set up new input and + * output streams. This call replaces any existing stream configuration with + * the streams defined in the stream_list. This method will be called at + * least once after initialize() before a request is submitted with + * process_capture_request(). + * + * The stream_list must contain at least one output-capable stream, and may + * not contain more than one input-capable stream. + * + * The stream_list may contain streams that are also in the currently-active + * set of streams (from the previous call to configure_stream()). These + * streams will already have valid values for usage, max_buffers, and the + * private pointer. + * + * If the HAL needs to change the stream configuration for an existing + * stream due to the new configuration, it may rewrite the values of usage + * and/or max_buffers during the configure call. + * + * The framework will detect such a change, and may then reallocate the + * stream buffers before using buffers from that stream in a request. + * + * If a currently-active stream is not included in stream_list, the HAL may + * safely remove any references to that stream. It will not be reused in a + * later configure() call by the framework, and all the gralloc buffers for + * it will be freed after the configure_streams() call returns. + * + * The stream_list structure is owned by the framework, and may not be + * accessed once this call completes. The address of an individual + * camera3_stream_t structure will remain valid for access by the HAL until + * the end of the first configure_stream() call which no longer includes + * that camera3_stream_t in the stream_list argument. The HAL may not change + * values in the stream structure outside of the private pointer, except for + * the usage and max_buffers members during the configure_streams() call + * itself. + * + * If the stream is new, max_buffer, and private pointer fields of the + * stream structure will all be set to 0. The usage will be set to the + * consumer usage flags. The HAL device must set these fields before the + * configure_streams() call returns. These fields are then used by the + * framework and the platform gralloc module to allocate the gralloc + * buffers for each stream. + * + * Newly allocated buffers may be included in a capture request at any time + * by the framework. Once a gralloc buffer is returned to the framework + * with process_capture_result (and its respective release_fence has been + * signaled) the framework may free or reuse it at any time. + * + * ------------------------------------------------------------------------ + * * Preconditions: * * The framework will only call this method when no captures are being @@ -1888,7 +2379,7 @@ typedef struct camera3_device_ops { * frame rate given the sizes and formats of the output streams, as * documented in the camera device's static metadata. * - * Performance expectations: + * Performance requirements: * * This call is expected to be heavyweight and possibly take several hundred * milliseconds to complete, since it may require resetting and @@ -1898,6 +2389,9 @@ typedef struct camera3_device_ops { * application operational mode changes (such as switching from still * capture to video recording). * + * The HAL should return from this call in 500ms, and must return from this + * call in 1000ms. + * * Return values: * * 0: On successful stream configuration @@ -1933,6 +2427,12 @@ typedef struct camera3_device_ops { /** * register_stream_buffers: * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * DEPRECATED. This will not be called and must be set to NULL. + * + * <= CAMERA_DEVICE_API_VERSION_3_1: + * * Register buffers for a given stream with the HAL device. This method is * called by the framework after a new stream is defined by * configure_streams, and before buffers from that stream are included in a @@ -1955,6 +2455,11 @@ typedef struct camera3_device_ops { * the camera HAL should inspect the passed-in buffers here to determine any * platform-private pixel format information. * + * Performance requirements: + * + * This should be a non-blocking call. The HAL should return from this call + * in 1ms, and must return from this call in 5ms. + * * Return values: * * 0: On successful registration of the new stream buffers @@ -1992,6 +2497,11 @@ typedef struct camera3_device_ops { * buffer may be returned for subsequent calls for the same template, or for * other templates. * + * Performance requirements: + * + * This should be a non-blocking call. The HAL should return from this call + * in 1ms, and must return from this call in 5ms. + * * Return values: * * Valid metadata: On successful creation of a default settings @@ -2036,6 +2546,22 @@ typedef struct camera3_device_ops { * framework will wait on the sync fence before refilling and reusing the * input buffer. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * + * The input/output buffers provided by the framework in each request + * may be brand new (having never before seen by the HAL). + * + * ------------------------------------------------------------------------ + * Performance considerations: + * + * Handling a new buffer should be extremely lightweight and there should be + * no frame rate degradation or frame jitter introduced. + * + * This call must return fast enough to ensure that the requested frame + * rate can be sustained, especially for streaming cases (post-processing + * quality settings set to FAST). The HAL should return this call in 1 + * frame interval, and must return from this call in 4 frame intervals. + * * Return values: * * 0: On a successful start to processing the capture request @@ -2071,6 +2597,10 @@ typedef struct camera3_device_ops { * The definition of vendor_tag_query_ops_t can be found in * system/media/camera/include/system/camera_metadata.h. * + * >= CAMERA_DEVICE_API_VERSION_3_2: + * DEPRECATED. This function has been deprecated and should be set to + * NULL by the HAL. Please implement get_vendor_tag_ops in camera_common.h + * instead. */ void (*get_metadata_vendor_tag_ops)(const struct camera3_device*, vendor_tag_query_ops_t* ops); @@ -2084,6 +2614,14 @@ typedef struct camera3_device_ops { * * The passed-in file descriptor can be used to write debugging text using * dprintf() or write(). The text should be in ASCII encoding only. + * + * Performance requirements: + * + * This must be a non-blocking call. The HAL should return from this call + * in 1ms, must return from this call in 10ms. This call must avoid + * deadlocks, as it may be called at any point during camera operation. + * Any synchronization primitives used (such as mutex locks or semaphores) + * should be acquired with a timeout. */ void (*dump)(const struct camera3_device *, int fd); @@ -2095,22 +2633,73 @@ typedef struct camera3_device_ops { * quickly as possible in order to prepare for a configure_streams() call. * * No buffers are required to be successfully returned, so every buffer - * held at the time of flush() (whether sucessfully filled or not) may be + * held at the time of flush() (whether successfully filled or not) may be * returned with CAMERA3_BUFFER_STATUS_ERROR. Note the HAL is still allowed - * to return valid (STATUS_OK) buffers during this call, provided they are - * succesfully filled. + * to return valid (CAMERA3_BUFFER_STATUS_OK) buffers during this call, + * provided they are successfully filled. * * All requests currently in the HAL are expected to be returned as soon as * possible. Not-in-process requests should return errors immediately. Any * interruptible hardware blocks should be stopped, and any uninterruptible * blocks should be waited on. * + * More specifically, the HAL must follow below requirements for various cases: + * + * 1. For captures that are too late for the HAL to cancel/stop, and will be + * completed normally by the HAL; i.e. the HAL can send shutter/notify and + * process_capture_result and buffers as normal. + * + * 2. For pending requests that have not done any processing, the HAL must call notify + * CAMERA3_MSG_ERROR_REQUEST, and return all the output buffers with + * process_capture_result in the error state (CAMERA3_BUFFER_STATUS_ERROR). + * The HAL must not place the release fence into an error state, instead, + * the release fences must be set to the acquire fences passed by the framework, + * or -1 if they have been waited on by the HAL already. This is also the path + * to follow for any captures for which the HAL already called notify() with + * CAMERA3_MSG_SHUTTER but won't be producing any metadata/valid buffers for. + * After CAMERA3_MSG_ERROR_REQUEST, for a given frame, only process_capture_results with + * buffers in CAMERA3_BUFFER_STATUS_ERROR are allowed. No further notifys or + * process_capture_result with non-null metadata is allowed. + * + * 3. For partially completed pending requests that will not have all the output + * buffers or perhaps missing metadata, the HAL should follow below: + * + * 3.1. Call notify with CAMERA3_MSG_ERROR_RESULT if some of the expected result + * metadata (i.e. one or more partial metadata) won't be available for the capture. + * + * 3.2. Call notify with CAMERA3_MSG_ERROR_BUFFER for every buffer that won't + * be produced for the capture. + * + * 3.3 Call notify with CAMERA3_MSG_SHUTTER with the capture timestamp before + * any buffers/metadata are returned with process_capture_result. + * + * 3.4 For captures that will produce some results, the HAL must not call + * CAMERA3_MSG_ERROR_REQUEST, since that indicates complete failure. + * + * 3.5. Valid buffers/metadata should be passed to the framework as normal. + * + * 3.6. Failed buffers should be returned to the framework as described for case 2. + * But failed buffers do not have to follow the strict ordering valid buffers do, + * and may be out-of-order with respect to valid buffers. For example, if buffers + * A, B, C, D, E are sent, D and E are failed, then A, E, B, D, C is an acceptable + * return order. + * + * 3.7. For fully-missing metadata, calling CAMERA3_MSG_ERROR_RESULT is sufficient, no + * need to call process_capture_result with NULL metadata or equivalent. + * * flush() should only return when there are no more outstanding buffers or - * requests left in the HAL. The framework may call configure_streams (as + * requests left in the HAL. The framework may call configure_streams (as * the HAL state is now quiesced) or may issue new requests. * - * A flush() call should only take 100ms or less. The maximum time it can - * take is 1 second. + * Note that it's sufficient to only support fully-succeeded and fully-failed result cases. + * However, it is highly desirable to support the partial failure cases as well, as it + * could help improve the flush call overall performance. + * + * Performance requirements: + * + * The HAL should return from this call in 100ms, and must return from this + * call in 1000ms. And this call must not be blocked longer than pipeline + * latency (see S7 for definition). * * Version information: * @@ -2141,6 +2730,13 @@ typedef struct camera3_device { /** * common.version must equal CAMERA_DEVICE_API_VERSION_3_0 to identify this * device as implementing version 3.0 of the camera device HAL. + * + * Performance requirements: + * + * Camera open (common.module->common.methods->open) should return in 200ms, and must return + * in 500ms. + * Camera close (common.close) should return in 200ms, and must return in 500ms. + * */ hw_device_t common; camera3_device_ops_t *ops; diff --git a/include/hardware/camera_common.h b/include/hardware/camera_common.h index 3a1233f..2508022 100644 --- a/include/hardware/camera_common.h +++ b/include/hardware/camera_common.h @@ -24,6 +24,7 @@ #include <sys/types.h> #include <cutils/native_handle.h> #include <system/camera.h> +#include <system/camera_vendor_tags.h> #include <hardware/hardware.h> #include <hardware/gralloc.h> @@ -100,8 +101,9 @@ __BEGIN_DECLS #define CAMERA_DEVICE_API_VERSION_2_1 HARDWARE_DEVICE_API_VERSION(2, 1) #define CAMERA_DEVICE_API_VERSION_3_0 HARDWARE_DEVICE_API_VERSION(3, 0) #define CAMERA_DEVICE_API_VERSION_3_1 HARDWARE_DEVICE_API_VERSION(3, 1) +#define CAMERA_DEVICE_API_VERSION_3_2 HARDWARE_DEVICE_API_VERSION(3, 2) -// Device version 2.x is outdated; device version 3.0 is experimental +// Device version 2.x is outdated; device version 3.x is experimental #define CAMERA_DEVICE_API_VERSION_CURRENT CAMERA_DEVICE_API_VERSION_1_0 /** @@ -251,65 +253,6 @@ typedef struct camera_module_callbacks { } camera_module_callbacks_t; -/** - * Set up vendor-specific tag query methods. These are needed to properly query - * entries with vendor-specified tags, potentially returned by get_camera_info. - * - * This should be used in place of vendor_tag_query_ops, which are deprecated. - */ -typedef struct vendor_tag_ops vendor_tag_ops_t; -struct vendor_tag_ops { - /** - * Get the number of vendor tags supported on this platform. Used to - * calculate the size of buffer needed for holding the array of all tags - * returned by get_all_tags(). - */ - int (*get_tag_count)(const vendor_tag_ops_t *v); - - /** - * Fill an array with all the supported vendor tags on this platform. - * get_tag_count() returns the number of tags supported, and - * tag_array will be allocated with enough space to hold all of the tags. - */ - void (*get_all_tags)(const vendor_tag_ops_t *v, uint32_t *tag_array); - - /** - * Get vendor section name for a vendor-specified entry tag. Only called for - * vendor-defined tags. The section name must start with the name of the - * vendor in the Java package style. For example, CameraZoom Inc. must - * prefix their sections with "com.camerazoom." Must return NULL if the tag - * is outside the bounds of vendor-defined sections. - * - * There may be different vendor-defined tag sections, for example the - * phone maker, the chipset maker, and the camera module maker may each - * have their own "com.vendor."-prefixed section. - * - * The memory pointed to by the return value must remain valid for the - * lifetime that the module is loaded, and is owned by the module. - */ - const char *(*get_section_name)(const vendor_tag_ops_t *v, uint32_t tag); - - /** - * Get tag name for a vendor-specified entry tag. Only called for - * vendor-defined tags. Must return NULL if the it is not a vendor-defined - * tag. - * - * The memory pointed to by the return value must remain valid for the - * lifetime that the module is loaded, and is owned by the module. - */ - const char *(*get_tag_name)(const vendor_tag_ops_t *v, uint32_t tag); - - /** - * Get tag type for a vendor-specified entry tag. Only called for tags >= - * 0x80000000. Must return -1 if the tag is outside the bounds of - * vendor-defined sections. - */ - int (*get_tag_type)(const vendor_tag_ops_t *v, uint32_t tag); - - /* reserved for future use */ - void* reserved[8]; -}; - typedef struct camera_module { hw_module_t common; @@ -365,6 +308,9 @@ typedef struct camera_module { * HAL should fill in all the vendor tag operation methods, or leave ops * unchanged if no vendor tags are defined. * + * The vendor_tag_ops structure used here is defined in: + * system/media/camera/include/system/vendor_tags.h + * * Version information (based on camera_module_t.common.module_api_version): * * CAMERA_MODULE_API_VERSION_1_x/2_0/2_1: diff --git a/include/hardware/fingerprint.h b/include/hardware/fingerprint.h new file mode 100644 index 0000000..b295ebb --- /dev/null +++ b/include/hardware/fingerprint.h @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_INCLUDE_HARDWARE_FINGERPRINT_H +#define ANDROID_INCLUDE_HARDWARE_FINGERPRINT_H + +#define FINGERPRINT_MODULE_API_VERSION_1_0 HARDWARE_MODULE_API_VERSION(1, 0) +#define FINGERPRINT_HARDWARE_MODULE_ID "fingerprint" + +typedef enum fingerprint_msg_type { + FINGERPRINT_ERROR = -1, + FINGERPRINT_SCANNED = 1, + FINGERPRINT_TEMPLATE_ENROLLING = 2, + FINGERPRINT_TEMPLATE_REMOVED = 4 +} fingerprint_msg_type_t; + +typedef enum fingerprint_error { + FINGERPRINT_ERROR_HW_UNAVAILABLE = 1, + FINGERPRINT_ERROR_BAD_CAPTURE = 2, + FINGERPRINT_ERROR_TIMEOUT = 3, + FINGERPRINT_ERROR_NO_SPACE = 4 /* No space available to store a template */ +} fingerprint_error_t; + +typedef struct fingerprint_enroll { + uint32_t id; + /* samples_remaining goes from N (no data collected, but N scans needed) + * to 0 (no more data is needed to build a template) + * If HAL fails to decrement samples_remaining between calls the client + * will declare template collection a failure and should abort the operation + * by calling module->common.methods->close() */ + uint32_t samples_remaining; +} fingerprint_enroll_t; + +typedef struct fingerprint_removed { + uint32_t id; +} fingerprint_removed_t; + +typedef struct fingerprint_scanned { + uint32_t id; /* 0 is a special id and means no match */ + uint32_t confidence; /* Goes form 0 (no match) to 0xffffFFFF (100% sure) */ +} fingerprint_scanned_t; + +typedef struct fingerprint_msg { + fingerprint_msg_type_t type; + union { + uint64_t raw; + fingerprint_error_t error; + fingerprint_enroll_t enroll; + fingerprint_removed_t removed; + fingerprint_scanned_t scan; + } data; +} fingerprint_msg_t; + +/* Callback function type */ +typedef void (*fingerprint_notify_t)(fingerprint_msg_t msg); + +/* Synchronous operation */ +typedef struct fingerprint_device { + struct hw_device_t common; + + /* + * Fingerprint enroll request: + * Switches the HAL state machine to collect and store a new fingerprint + * template. Switches back as soon as enroll is complete + * (fingerprint_msg.type == FINGERPRINT_TEMPLATE_ENROLLING && + * fingerprint_msg.data.enroll.samples_remaining == 0) + * or after timeout_sec seconds. + * + * Function return: 0 if enrollment process can be successfully started + * -1 otherwise. A notify() function may be called + * indicating the error condition. + */ + int (*enroll)(struct fingerprint_device *dev, uint32_t timeout_sec); + + /* + * Fingerprint remove request: + * deletes a fingerprint template. + * If the fingerprint id is 0 the entire template database will be removed. + * notify() will be called for each template deleted with + * fingerprint_msg.type == FINGERPRINT_TEMPLATE_REMOVED and + * fingerprint_msg.data.removed.id indicating each template id removed. + * + * Function return: 0 if fingerprint template(s) can be successfully deleted + * -1 otherwise. + */ + int (*remove)(struct fingerprint_device *dev, uint32_t fingerprint_id); + + /* + * Set notification callback: + * Registers a user function that would receive notifications from the HAL + * The call will block if the HAL state machine is in busy state until HAL + * leaves the busy state. + * + * Function return: 0 if callback function is successfuly registered + * -1 otherwise. + */ + int (*set_notify)(struct fingerprint_device *dev, + fingerprint_notify_t notify); + + /* + * Client provided callback function to receive notifications. + * Do not set by hand, use the function above instead. + */ + fingerprint_notify_t notify; + + /* Reserved for future use. Must be NULL. */ + void* reserved[8 - 4]; +} fingerprint_device_t; + +typedef struct fingerprint_module { + struct hw_module_t common; +} fingerprint_module_t; + +#endif /* ANDROID_INCLUDE_HARDWARE_FINGERPRINT_H */ diff --git a/include/hardware/gps.h b/include/hardware/gps.h index 458b5b4..4167793 100644 --- a/include/hardware/gps.h +++ b/include/hardware/gps.h @@ -221,6 +221,11 @@ typedef uint16_t AGpsStatusValue; #define AGPS_INTERFACE "agps" /** + * Name of the Supl Certificate interface. + */ +#define SUPL_CERTIFICATE_INTERFACE "supl-certificate" + +/** * Name for NI interface */ #define GPS_NI_INTERFACE "gps-ni" @@ -507,7 +512,7 @@ typedef struct { */ void (*init)( AGpsCallbacks* callbacks ); /** - * Notifies that a data connection is available and sets + * Notifies that a data connection is available and sets * the name of the APN to be used for SUPL. */ int (*data_conn_open)( const char* apn ); @@ -516,7 +521,7 @@ typedef struct { */ int (*data_conn_closed)(); /** - * Notifies that a data connection is not available for AGPS. + * Notifies that a data connection is not available for AGPS. */ int (*data_conn_failed)(); /** @@ -525,6 +530,72 @@ typedef struct { int (*set_server)( AGpsType type, const char* hostname, int port ); } AGpsInterface; +/** Error codes associated with certificate operations */ +#define AGPS_CERTIFICATE_OPERATION_SUCCESS 0 +#define AGPS_CERTIFICATE_ERROR_GENERIC -100 +#define AGPS_CERTIFICATE_ERROR_TOO_MANY_CERTIFICATES -101 + +/** A data structure that represents an X.509 certificate using DER encoding */ +typedef struct { + size_t length; + u_char* data; +} DerEncodedCertificate; + +/** + * A type definition for SHA1 Fingerprints used to identify X.509 Certificates + * The Fingerprint is a digest of the DER Certificate that uniquely identifies it. + */ +typedef struct { + u_char data[20]; +} Sha1CertificateFingerprint; + +/** AGPS Inteface to handle SUPL certificate operations */ +typedef struct { + /** set to sizeof(SuplCertificateInterface) */ + size_t size; + + /** + * Installs a set of Certificates used for SUPL connections to the AGPS server. + * If needed the HAL should find out internally any certificates that need to be removed to + * accommodate the certificates to install. + * The certificates installed represent a full set of valid certificates needed to connect to + * AGPS SUPL servers. + * The list of certificates is required, and all must be available at the same time, when trying + * to establish a connection with the AGPS Server. + * + * Parameters: + * certificates - A pointer to an array of DER encoded certificates that are need to be + * installed in the HAL. + * length - The number of certificates to install. + * Returns: + * AGPS_CERTIFICATE_OPERATION_SUCCESS if the operation is completed successfully + * AGPS_CERTIFICATE_ERROR_TOO_MANY_CERTIFICATES if the HAL cannot store the number of + * certificates attempted to be installed, the state of the certificates stored should + * remain the same as before on this error case. + * + * IMPORTANT: + * If needed the HAL should find out internally the set of certificates that need to be + * removed to accommodate the certificates to install. + */ + int (*install_certificates) ( const DerEncodedCertificate* certificates, size_t length ); + + /** + * Notifies the HAL that a list of certificates used for SUPL connections are revoked. It is + * expected that the given set of certificates is removed from the internal store of the HAL. + * + * Parameters: + * fingerprints - A pointer to an array of SHA1 Fingerprints to identify the set of + * certificates to revoke. + * length - The number of fingerprints provided. + * Returns: + * AGPS_CERTIFICATE_OPERATION_SUCCESS if the operation is completed successfully. + * + * IMPORTANT: + * If any of the certificates provided (through its fingerprint) is not known by the HAL, + * it should be ignored and continue revoking/deleting the rest of them. + */ + int (*revoke_certificates) ( const Sha1CertificateFingerprint* fingerprints, size_t length ); +} SuplCertificateInterface; /** Represents an NI request */ typedef struct { diff --git a/include/hardware/hardware.h b/include/hardware/hardware.h index 416ae39..74f57aa 100644 --- a/include/hardware/hardware.h +++ b/include/hardware/hardware.h @@ -144,8 +144,12 @@ typedef struct hw_module_t { /** module's dso */ void* dso; +#ifdef __LP64__ + uint64_t reserved[32-7]; +#else /** padding to 128 bytes, reserved for future use */ uint32_t reserved[32-7]; +#endif } hw_module_t; @@ -186,7 +190,11 @@ typedef struct hw_device_t { struct hw_module_t* module; /** padding reserved for future use */ +#ifdef __LP64__ + uint64_t reserved[12]; +#else uint32_t reserved[12]; +#endif /** Close this device */ int (*close)(struct hw_device_t* device); diff --git a/include/hardware/hwcomposer.h b/include/hardware/hwcomposer.h index 86479d3..afb4e99 100644 --- a/include/hardware/hwcomposer.h +++ b/include/hardware/hwcomposer.h @@ -121,6 +121,26 @@ typedef struct hwc_layer_1 { * that the layer will be handled by the HWC (ie: it must not be * composited with OpenGL ES). * + * + * HWC_SIDEBAND + * Set by the caller before calling (*prepare)(), this value indicates + * the contents of this layer come from a sideband video stream. + * + * The h/w composer is responsible for receiving new image buffers from + * the stream at the appropriate time (e.g. synchronized to a separate + * audio stream), compositing them with the current contents of other + * layers, and displaying the resulting image. This happens + * independently of the normal prepare/set cycle. The prepare/set calls + * only happen when other layers change, or when properties of the + * sideband layer such as position or size change. + * + * If the h/w composer can't handle the layer as a sideband stream for + * some reason (e.g. unsupported scaling/blending/rotation, or too many + * sideband layers) it can set compositionType to HWC_FRAMEBUFFER in + * (*prepare)(). However, doing so will result in the layer being shown + * as a solid color since the platform is not currently able to composite + * sideband layers with the GPU. This may be improved in future + * versions of the platform. */ int32_t compositionType; @@ -141,13 +161,21 @@ typedef struct hwc_layer_1 { hwc_color_t backgroundColor; struct { - /* handle of buffer to compose. This handle is guaranteed to have been - * allocated from gralloc using the GRALLOC_USAGE_HW_COMPOSER usage flag. If - * the layer's handle is unchanged across two consecutive prepare calls and - * the HWC_GEOMETRY_CHANGED flag is not set for the second call then the - * HWComposer implementation may assume that the contents of the buffer have - * not changed. */ - buffer_handle_t handle; + union { + /* When compositionType is HWC_FRAMEBUFFER, HWC_OVERLAY, + * HWC_FRAMEBUFFER_TARGET, this is the handle of the buffer to + * compose. This handle is guaranteed to have been allocated + * from gralloc using the GRALLOC_USAGE_HW_COMPOSER usage flag. + * If the layer's handle is unchanged across two consecutive + * prepare calls and the HWC_GEOMETRY_CHANGED flag is not set + * for the second call then the HWComposer implementation may + * assume that the contents of the buffer have not changed. */ + buffer_handle_t handle; + + /* When compositionType is HWC_SIDEBAND, this is the handle + * of the sideband video stream to compose. */ + const native_handle_t* sidebandStream; + }; /* transformation to apply to the buffer during composition */ uint32_t transform; @@ -191,6 +219,10 @@ typedef struct hwc_layer_1 { * reads from them are complete before the framebuffer is ready for * display. * + * HWC_SIDEBAND layers will never have an acquire fence, since + * synchronization is handled through implementation-defined + * sideband mechanisms. + * * The HWC takes ownership of the acquireFenceFd and is responsible * for closing it when no longer needed. */ @@ -214,6 +246,10 @@ typedef struct hwc_layer_1 { * produce a release fence for them. The releaseFenceFd will be -1 * for these layers when set() is called. * + * Since HWC_SIDEBAND buffers don't pass through the HWC client, + * the HWC shouldn't produce a release fence for them. The + * releaseFenceFd will be -1 for these layers when set() is called. + * * The HWC client taks ownership of the releaseFenceFd and is * responsible for closing it when no longer needed. */ diff --git a/include/hardware/hwcomposer_defs.h b/include/hardware/hwcomposer_defs.h index c69a4bc..242e3f6 100644 --- a/include/hardware/hwcomposer_defs.h +++ b/include/hardware/hwcomposer_defs.h @@ -36,6 +36,7 @@ __BEGIN_DECLS #define HWC_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, HWC_HEADER_VERSION) #define HWC_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION_2(1, 2, HWC_HEADER_VERSION) #define HWC_DEVICE_API_VERSION_1_3 HARDWARE_DEVICE_API_VERSION_2(1, 3, HWC_HEADER_VERSION) +#define HWC_DEVICE_API_VERSION_1_4 HARDWARE_DEVICE_API_VERSION_2(1, 4, HWC_HEADER_VERSION) enum { /* hwc_composer_device_t::set failed in EGL */ @@ -95,6 +96,10 @@ enum { /* this layer holds the result of compositing the HWC_FRAMEBUFFER layers. * Added in HWC_DEVICE_API_VERSION_1_1. */ HWC_FRAMEBUFFER_TARGET = 3, + + /* this layer's contents are taken from a sideband buffer stream. + * Added in HWC_DEVICE_API_VERSION_1_4. */ + HWC_SIDEBAND = 4, }; /* diff --git a/include/hardware/nfc_tag.h b/include/hardware/nfc_tag.h new file mode 100644 index 0000000..72028f4 --- /dev/null +++ b/include/hardware/nfc_tag.h @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_NFC_TAG_HAL_INTERFACE_H +#define ANDROID_NFC_TAG_HAL_INTERFACE_H + +#include <stdint.h> + +#include <hardware/hardware.h> + +__BEGIN_DECLS + +/* + * HAL for programmable NFC tags. + * + */ + +#define NFC_TAG_HARDWARE_MODULE_ID "nfc_tag" +#define NFC_TAG_ID "tag" + +typedef struct nfc_tag_module_t { + struct hw_module_t common; +} nfc_tag_module_t; + +typedef struct nfc_tag_device { + struct hw_device_t common; + + /** + * Initialize the NFC tag. + * + * The driver must: + * * Set the static lock bytes to read only + * * Configure the Capability Container to disable write acess + * eg: 0xE1 0x10 <size> 0x0F + * + * This function is called once before any calls to setContent(). + * + * Return 0 on success or -errno on error. + */ + int (*init)(const struct nfc_tag_device *dev); + + /** + * Set the NFC tag content. + * + * The driver must write <data> in the data area of the tag starting at + * byte 0 of block 4 and zero the rest of the data area. + * + * Returns 0 on success or -errno on error. + */ + int (*setContent)(const struct nfc_tag_device *dev, const uint8_t *data, size_t len); + + /** + * Returns the memory size of the data area. + */ + int (*getMemorySize)(const struct nfc_tag_device *dev); +} nfc_tag_device_t; + +static inline int nfc_tag_open(const struct hw_module_t* module, + nfc_tag_device_t** dev) { + return module->methods->open(module, NFC_TAG_ID, + (struct hw_device_t**)dev); +} + +static inline int nfc_tag_close(nfc_tag_device_t* dev) { + return dev->common.close(&dev->common); +} + +__END_DECLS + +#endif // ANDROID_NFC_TAG_HAL_INTERFACE_H diff --git a/include/hardware/power.h b/include/hardware/power.h index 89d57ed..dc33705 100644 --- a/include/hardware/power.h +++ b/include/hardware/power.h @@ -44,7 +44,8 @@ typedef enum { * KLP. */ POWER_HINT_VIDEO_ENCODE = 0x00000003, - POWER_HINT_VIDEO_DECODE = 0x00000004 + POWER_HINT_VIDEO_DECODE = 0x00000004, + POWER_HINT_LOW_POWER = 0x00000005 } power_hint_t; /** @@ -112,6 +113,13 @@ typedef struct power_module { * and it may be appropriate to raise speeds of CPU, memory bus, * etc. The data parameter is unused. * + * POWER_HINT_LOW_POWER + * + * Low power mode is activated or deactivated. Low power mode + * is intended to save battery at the cost of performance. The data + * parameter is non-zero when low power mode is activated, and zero + * when deactivated. + * * A particular platform may choose to ignore any hint. * * availability: version 0.2 diff --git a/include/hardware/sensors.h b/include/hardware/sensors.h index ef86a40..9327c41 100644 --- a/include/hardware/sensors.h +++ b/include/hardware/sensors.h @@ -34,6 +34,13 @@ __BEGIN_DECLS #define SENSORS_DEVICE_API_VERSION_1_0 HARDWARE_DEVICE_API_VERSION_2(1, 0, SENSORS_HEADER_VERSION) #define SENSORS_DEVICE_API_VERSION_1_1 HARDWARE_DEVICE_API_VERSION_2(1, 1, SENSORS_HEADER_VERSION) #define SENSORS_DEVICE_API_VERSION_1_2 HARDWARE_DEVICE_API_VERSION_2(1, 2, SENSORS_HEADER_VERSION) +#define SENSORS_DEVICE_API_VERSION_1_3 HARDWARE_DEVICE_API_VERSION_2(1, 3, SENSORS_HEADER_VERSION) + +/** + * Please see the Sensors section of source.android.com for an + * introduction to and detailed descriptions of Android sensor types: + * http://source.android.com/devices/sensors/index.html + */ /** * The id of this module @@ -57,9 +64,12 @@ __BEGIN_DECLS /* + * **** Deprecated ***** * flags for (*batch)() * Availability: SENSORS_DEVICE_API_VERSION_1_0 - * see (*batch)() documentation for details + * see (*batch)() documentation for details. + * Deprecated as of SENSORS_DEVICE_API_VERSION_1_3. + * WAKE_UP_* sensors replace WAKE_UPON_FIFO_FULL concept. */ enum { SENSORS_BATCH_DRY_RUN = 0x00000001, @@ -82,74 +92,25 @@ enum { */ #define SENSOR_PERMISSION_BODY_SENSORS "android.permission.BODY_SENSORS" -/** - * Definition of the axis used by the sensor HAL API - * - * This API is relative to the screen of the device in its default orientation, - * that is, if the device can be used in portrait or landscape, this API - * is only relative to the NATURAL orientation of the screen. In other words, - * the axis are not swapped when the device's screen orientation changes. - * Higher level services /may/ perform this transformation. - * - * x<0 x>0 - * ^ - * | - * +-----------+--> y>0 - * | | - * | | - * | | - * | | / z<0 - * | | / - * | | / - * O-----------+/ - * |[] [ ] []/ - * +----------/+ y<0 - * / - * / - * |/ z>0 (toward the sky) - * - * O: Origin (x=0,y=0,z=0) - * - */ - /* - * Interaction with suspend mode - * - * Unless otherwise noted, an enabled sensor shall not prevent the - * SoC to go into suspend mode. It is the responsibility of applications - * to keep a partial wake-lock should they wish to receive sensor - * events while the screen is off. While in suspend mode, and unless - * otherwise noted (batch mode, sensor particularities, ...), enabled sensors' - * events are lost. - * - * Note that conceptually, the sensor itself is not de-activated while in - * suspend mode -- it's just that the data it returns are lost. As soon as - * the SoC gets out of suspend mode, operations resume as usual. Of course, - * in practice sensors shall be disabled while in suspend mode to - * save power, unless batch mode is active, in which case they must - * continue fill their internal FIFO (see the documentation of batch() to - * learn how suspend interacts with batch mode). - * - * In batch mode, and only when the flag SENSORS_BATCH_WAKE_UPON_FIFO_FULL is - * set and supported, the specified sensor must be able to wake-up the SoC and - * be able to buffer at least 10 seconds worth of the requested sensor events. - * - * There are notable exceptions to this behavior, which are sensor-dependent - * (see sensor types definitions below) - * - * - * The sensor type documentation below specifies the wake-up behavior of - * each sensor: - * wake-up: yes this sensor must wake-up the SoC to deliver events - * wake-up: no this sensor shall not wake-up the SoC, events are dropped - * + * Availability: SENSORS_DEVICE_API_VERSION_1_3 + * Sensor flags used in sensor_t.flags. */ +enum { + /* + * Whether this sensor wakes up the AP from suspend mode when data is available. + */ + SENSOR_FLAG_WAKE_UP = 1U << 0 +}; /* * Sensor type * * Each sensor has a type which defines what this sensor measures and how - * measures are reported. All types are defined below. + * measures are reported. See the Base sensors and Composite sensors lists + * for complete descriptions: + * http://source.android.com/devices/sensors/base_triggers.html + * http://source.android.com/devices/sensors/composite_sensors.html * * Device manufacturers (OEMs) can define their own sensor types, for * their private use by applications or services provided by them. Such @@ -196,47 +157,6 @@ enum { #define SENSOR_TYPE_DEVICE_PRIVATE_BASE 0x10000 /* - * Sensor fusion and virtual sensors - * - * Many sensor types are or can be implemented as virtual sensors from - * physical sensors on the device. For instance the rotation vector sensor, - * orientation sensor, step-detector, step-counter, etc... - * - * From the point of view of this API these virtual sensors MUST appear as - * real, individual sensors. It is the responsibility of the driver and HAL - * to make sure this is the case. - * - * In particular, all sensors must be able to function concurrently. - * For example, if defining both an accelerometer and a step counter, - * then both must be able to work concurrently. - */ - -/* - * Trigger modes - * - * Sensors can report events in different ways called trigger modes, - * each sensor type has one and only one trigger mode associated to it. - * Currently there are four trigger modes defined: - * - * continuous: events are reported at a constant rate defined by setDelay(). - * eg: accelerometers, gyroscopes. - * on-change: events are reported only if the sensor's value has changed. - * setDelay() is used to set a lower limit to the reporting - * period (minimum time between two events). - * The HAL must return an event immediately when an on-change - * sensor is activated. - * eg: proximity, light sensors - * one-shot: upon detection of an event, the sensor deactivates itself and - * then sends a single event. Order matters to avoid race - * conditions. No other event is sent until the sensor get - * reactivated. setDelay() is ignored. - * eg: significant motion sensor - * special: see details in the sensor type specification below - * - */ - - -/* * SENSOR_TYPE_META_DATA * trigger-mode: n/a * wake-up sensor: n/a @@ -277,30 +197,6 @@ enum { * All values are in SI units (m/s^2) and measure the acceleration of the * device minus the force of gravity. * - * Acceleration sensors return sensor events for all 3 axes at a constant - * rate defined by setDelay(). - * - * x: Acceleration on the x-axis - * y: Acceleration on the y-axis - * z: Acceleration on the z-axis - * - * Note that the readings from the accelerometer include the acceleration - * due to gravity (which is opposite to the direction of the gravity vector). - * - * Examples: - * The norm of <x, y, z> should be close to 0 when in free fall. - * - * When the device lies flat on a table and is pushed on its left side - * toward the right, the x acceleration value is positive. - * - * When the device lies flat on a table, the acceleration value is +9.81, - * which correspond to the acceleration of the device (0 m/s^2) minus the - * force of gravity (-9.81 m/s^2). - * - * When the device lies flat on a table and is pushed toward the sky, the - * acceleration value is greater than +9.81, which correspond to the - * acceleration of the device (+A m/s^2) minus the force of - * gravity (-9.81 m/s^2). */ #define SENSOR_TYPE_ACCELEROMETER (1) #define SENSOR_STRING_TYPE_ACCELEROMETER "android.sensor.accelerometer" @@ -313,12 +209,6 @@ enum { * All values are in micro-Tesla (uT) and measure the geomagnetic * field in the X, Y and Z axis. * - * Returned values include calibration mechanisms such that the vector is - * aligned with the magnetic declination and heading of the earth's - * geomagnetic field. - * - * Magnetic Field sensors return sensor events for all 3 axes at a constant - * rate defined by setDelay(). */ #define SENSOR_TYPE_GEOMAGNETIC_FIELD (2) #define SENSOR_TYPE_MAGNETIC_FIELD SENSOR_TYPE_GEOMAGNETIC_FIELD @@ -328,39 +218,11 @@ enum { * SENSOR_TYPE_ORIENTATION * trigger-mode: continuous * wake-up sensor: no - * + * * All values are angles in degrees. - * + * * Orientation sensors return sensor events for all 3 axes at a constant * rate defined by setDelay(). - * - * azimuth: angle between the magnetic north direction and the Y axis, around - * the Z axis (0<=azimuth<360). - * 0=North, 90=East, 180=South, 270=West - * - * pitch: Rotation around X axis (-180<=pitch<=180), with positive values when - * the z-axis moves toward the y-axis. - * - * roll: Rotation around Y axis (-90<=roll<=90), with positive values when - * the x-axis moves towards the z-axis. - * - * Note: For historical reasons the roll angle is positive in the clockwise - * direction (mathematically speaking, it should be positive in the - * counter-clockwise direction): - * - * Z - * ^ - * (+roll) .--> | - * / | - * | | roll: rotation around Y axis - * X <-------(.) - * Y - * note that +Y == -roll - * - * - * - * Note: This definition is different from yaw, pitch and roll used in aviation - * where the X axis is along the long side of the plane (tail to nose). */ #define SENSOR_TYPE_ORIENTATION (3) #define SENSOR_STRING_TYPE_ORIENTATION "android.sensor.orientation" @@ -371,17 +233,7 @@ enum { * wake-up sensor: no * * All values are in radians/second and measure the rate of rotation - * around the X, Y and Z axis. The coordinate system is the same as is - * used for the acceleration sensor. Rotation is positive in the - * counter-clockwise direction (right-hand rule). That is, an observer - * looking from some positive location on the x, y or z axis at a device - * positioned on the origin would report positive rotation if the device - * appeared to be rotating counter clockwise. Note that this is the - * standard mathematical definition of positive rotation and does not agree - * with the definition of roll given earlier. - * The range should at least be 17.45 rad/s (ie: ~1000 deg/s). - * - * automatic gyro-drift compensation is allowed but not required. + * around the X, Y and Z axis. */ #define SENSOR_TYPE_GYROSCOPE (4) #define SENSOR_STRING_TYPE_GYROSCOPE "android.sensor.gyroscope" @@ -413,12 +265,9 @@ enum { /* * SENSOR_TYPE_PROXIMITY * trigger-mode: on-change - * wake-up sensor: yes + * wake-up sensor: yes (set SENSOR_FLAG_WAKE_UP flag) * - * The distance value is measured in centimeters. Note that some proximity - * sensors only support a binary "close" or "far" measurement. In this case, - * the sensor should report its maxRange value in the "far" state and a value - * less than maxRange in the "near" state. + * The value corresponds to the distance to the nearest object in centimeters. */ #define SENSOR_TYPE_PROXIMITY (8) #define SENSOR_STRING_TYPE_PROXIMITY "android.sensor.proximity" @@ -429,10 +278,7 @@ enum { * wake-up sensor: no * * A gravity output indicates the direction of and magnitude of gravity in - * the devices's coordinates. On Earth, the magnitude is 9.8 m/s^2. - * Units are m/s^2. The coordinate system is the same as is used for the - * acceleration sensor. When the device is at rest, the output of the - * gravity sensor should be identical to that of the accelerometer. + * the devices's coordinates. */ #define SENSOR_TYPE_GRAVITY (9) #define SENSOR_STRING_TYPE_GRAVITY "android.sensor.gravity" @@ -444,13 +290,6 @@ enum { * * Indicates the linear acceleration of the device in device coordinates, * not including gravity. - * - * The output is conceptually: - * output of TYPE_ACCELERATION - output of TYPE_GRAVITY - * - * Readings on all axes should be close to 0 when device lies on a table. - * Units are m/s^2. - * The coordinate system is the same as is used for the acceleration sensor. */ #define SENSOR_TYPE_LINEAR_ACCELERATION (10) #define SENSOR_STRING_TYPE_LINEAR_ACCELERATION "android.sensor.linear_acceleration" @@ -462,46 +301,7 @@ enum { * wake-up sensor: no * * The rotation vector symbolizes the orientation of the device relative to the - * East-North-Up coordinates frame. It is usually obtained by integration of - * accelerometer, gyroscope and magnetometer readings. - * - * The East-North-Up coordinate system is defined as a direct orthonormal basis - * where: - * - X points east and is tangential to the ground. - * - Y points north and is tangential to the ground. - * - Z points towards the sky and is perpendicular to the ground. - * - * The orientation of the phone is represented by the rotation necessary to - * align the East-North-Up coordinates with the phone's coordinates. That is, - * applying the rotation to the world frame (X,Y,Z) would align them with the - * phone coordinates (x,y,z). - * - * The rotation can be seen as rotating the phone by an angle theta around - * an axis rot_axis to go from the reference (East-North-Up aligned) device - * orientation to the current device orientation. - * - * The rotation is encoded as the 4 (reordered) components of a unit quaternion: - * sensors_event_t.data[0] = rot_axis.x*sin(theta/2) - * sensors_event_t.data[1] = rot_axis.y*sin(theta/2) - * sensors_event_t.data[2] = rot_axis.z*sin(theta/2) - * sensors_event_t.data[3] = cos(theta/2) - * where - * - rot_axis.x,y,z are the North-East-Up coordinates of a unit length vector - * representing the rotation axis - * - theta is the rotation angle - * - * The quaternion must be of norm 1 (it is a unit quaternion). Failure to ensure - * this will cause erratic client behaviour. - * - * In addition, this sensor reports an estimated heading accuracy. - * sensors_event_t.data[4] = estimated_accuracy (in radians) - * The heading error must be less than estimated_accuracy 95% of the time - * - * This sensor must use a gyroscope and an accelerometer as main orientation - * change input. - * - * This sensor can also include magnetometer input to make up for gyro drift, - * but it cannot be implemented using only a magnetometer. + * East-North-Up coordinates frame. */ #define SENSOR_TYPE_ROTATION_VECTOR (11) #define SENSOR_STRING_TYPE_ROTATION_VECTOR "android.sensor.rotation_vector" @@ -534,35 +334,6 @@ enum { * * Similar to SENSOR_TYPE_MAGNETIC_FIELD, but the hard iron calibration is * reported separately instead of being included in the measurement. - * Factory calibration and temperature compensation should still be applied to - * the "uncalibrated" measurement. - * Separating away the hard iron calibration estimation allows the system to - * better recover from bad hard iron estimation. - * - * All values are in micro-Tesla (uT) and measure the ambient magnetic - * field in the X, Y and Z axis. Assumptions that the the magnetic field - * is due to the Earth's poles should be avoided. - * - * The uncalibrated_magnetic event contains - * - 3 fields for uncalibrated measurement: x_uncalib, y_uncalib, z_uncalib. - * Each is a component of the measured magnetic field, with soft iron - * and temperature compensation applied, but not hard iron calibration. - * These values should be continuous (no re-calibration should cause a jump). - * - 3 fields for hard iron bias estimates: x_bias, y_bias, z_bias. - * Each field is a component of the estimated hard iron calibration. - * They represent the offsets to apply to the calibrated readings to obtain - * uncalibrated readings (x_uncalib ~= x_calibrated + x_bias) - * These values are expected to jump as soon as the estimate of the hard iron - * changes, and they should be stable the rest of the time. - * - * If this sensor is present, then the corresponding - * SENSOR_TYPE_MAGNETIC_FIELD must be present and both must return the - * same sensor_t::name and sensor_t::vendor. - * - * Minimum filtering should be applied to this sensor. In particular, low pass - * filters should be avoided. - * - * See SENSOR_TYPE_MAGNETIC_FIELD for more information */ #define SENSOR_TYPE_MAGNETIC_FIELD_UNCALIBRATED (14) #define SENSOR_STRING_TYPE_MAGNETIC_FIELD_UNCALIBRATED "android.sensor.magnetic_field_uncalibrated" @@ -573,21 +344,7 @@ enum { * wake-up sensor: no * * Similar to SENSOR_TYPE_ROTATION_VECTOR, but not using the geomagnetic - * field. Therefore the Y axis doesn't point north, but instead to some other - * reference. That reference is allowed to drift by the same order of - * magnitude than the gyroscope drift around the Z axis. - * - * This sensor does not report an estimated heading accuracy: - * sensors_event_t.data[4] is reserved and should be set to 0 - * - * In the ideal case, a phone rotated and returning to the same real-world - * orientation should report the same game rotation vector - * (without using the earth's geomagnetic field). - * - * This sensor must be based on a gyroscope. It cannot be implemented using - * a magnetometer. - * - * see SENSOR_TYPE_ROTATION_VECTOR for more details + * field. */ #define SENSOR_TYPE_GAME_ROTATION_VECTOR (15) #define SENSOR_STRING_TYPE_GAME_ROTATION_VECTOR "android.sensor.game_rotation_vector" @@ -598,92 +355,19 @@ enum { * wake-up sensor: no * * All values are in radians/second and measure the rate of rotation - * around the X, Y and Z axis. An estimation of the drift on each axis is - * reported as well. - * - * No gyro-drift compensation shall be performed. - * Factory calibration and temperature compensation should still be applied - * to the rate of rotation (angular speeds). - * - * The coordinate system is the same as is - * used for the acceleration sensor. Rotation is positive in the - * counter-clockwise direction (right-hand rule). That is, an observer - * looking from some positive location on the x, y or z axis at a device - * positioned on the origin would report positive rotation if the device - * appeared to be rotating counter clockwise. Note that this is the - * standard mathematical definition of positive rotation and does not agree - * with the definition of roll given earlier. - * The range should at least be 17.45 rad/s (ie: ~1000 deg/s). - * - * Content of an uncalibrated_gyro event: (units are rad/sec) - * x_uncalib : angular speed (w/o drift compensation) around the X axis - * y_uncalib : angular speed (w/o drift compensation) around the Y axis - * z_uncalib : angular speed (w/o drift compensation) around the Z axis - * x_bias : estimated drift around X axis in rad/s - * y_bias : estimated drift around Y axis in rad/s - * z_bias : estimated drift around Z axis in rad/s - * - * IMPLEMENTATION NOTES: - * - * If the implementation is not able to estimate the drift, then this - * sensor MUST NOT be reported by this HAL. Instead, the regular - * SENSOR_TYPE_GYROSCOPE is used without drift compensation. - * - * If this sensor is present, then the corresponding - * SENSOR_TYPE_GYROSCOPE must be present and both must return the - * same sensor_t::name and sensor_t::vendor. + * around the X, Y and Z axis. */ #define SENSOR_TYPE_GYROSCOPE_UNCALIBRATED (16) #define SENSOR_STRING_TYPE_GYROSCOPE_UNCALIBRATED "android.sensor.gyroscope_uncalibrated" - /* * SENSOR_TYPE_SIGNIFICANT_MOTION * trigger-mode: one-shot - * wake-up sensor: yes + * wake-up sensor: yes (set SENSOR_FLAG_WAKE_UP flag) * * A sensor of this type triggers an event each time significant motion * is detected and automatically disables itself. * The only allowed value to return is 1.0. - * - * A significant motion is a motion that might lead to a change in the user - * location. - * Examples of such motions are: - * walking, biking, sitting in a moving car, coach or train. - * Examples of situations that should not trigger significant motion: - * - phone in pocket and person is not moving - * - phone is on a table, even if the table shakes a bit due to nearby traffic - * or washing machine - * - * A note on false positive / false negative / power consumption tradeoff - * - The goal of this sensor is to save power. - * - Triggering an event when the user is not moving (false positive) is costly - * in terms of power, so it should be avoided. - * - Not triggering an event when the user is moving (false negative) is - * acceptable as long as it is not done repeatedly. If the user has been - * walking for 10 seconds, not triggering an event within those 10 seconds - * is not acceptable. - * - * IMPORTANT NOTE: this sensor type is very different from other types - * in that it must work when the screen is off without the need of - * holding a partial wake-lock and MUST allow the SoC to go into suspend. - * When significant motion is detected, the sensor must awaken the SoC and - * the event be reported. - * - * If a particular hardware cannot support this mode of operation then this - * sensor type MUST NOT be reported by the HAL. ie: it is not acceptable - * to "emulate" this sensor in the HAL. - * - * The whole point of this sensor type is to save power by keeping the - * SoC in suspend mode when the device is at rest. - * - * When the sensor is not activated, it must also be deactivated in the - * hardware: it must not wake up the SoC anymore, even in case of - * significant motion. - * - * setDelay() has no effect and is ignored. - * Once a "significant motion" event is returned, a sensor of this type - * must disables itself automatically, as if activate(..., 0) had been called. */ #define SENSOR_TYPE_SIGNIFICANT_MOTION (17) @@ -695,21 +379,8 @@ enum { * wake-up sensor: no * * A sensor of this type triggers an event each time a step is taken - * by the user. The only allowed value to return is 1.0 and an event is - * generated for each step. Like with any other event, the timestamp - * indicates when the event (here the step) occurred, this corresponds to when - * the foot hit the ground, generating a high variation in acceleration. - * - * While this sensor operates, it shall not disrupt any other sensors, in - * particular, but not limited to, the accelerometer; which might very well - * be in use as well. - * - * This sensor must be low power. That is, if the step detection cannot be - * done in hardware, this sensor should not be defined. Also, when the - * step detector is activated and the accelerometer is not, only steps should - * trigger interrupts (not accelerometer data). - * - * setDelay() has no impact on this sensor type + * by the user. The only allowed value to return is 1.0 and an event + * is generated for each step. */ #define SENSOR_TYPE_STEP_DETECTOR (18) @@ -724,46 +395,6 @@ enum { * A sensor of this type returns the number of steps taken by the user since * the last reboot while activated. The value is returned as a uint64_t and is * reset to zero only on a system / android reboot. - * - * The timestamp of the event is set to the time when the first step - * for that event was taken. - * See SENSOR_TYPE_STEP_DETECTOR for the signification of the time of a step. - * - * The minimum size of the hardware's internal counter shall be 16 bits - * (this restriction is here to avoid too frequent wake-ups when the - * delay is very large). - * - * IMPORTANT NOTE: this sensor type is different from other types - * in that it must work when the screen is off without the need of - * holding a partial wake-lock and MUST allow the SoC to go into suspend. - * Unlike other sensors, while in suspend mode this sensor must stay active, - * no events are reported during that time but, steps continue to be - * accounted for; an event will be reported as soon as the SoC resumes if - * the timeout has expired. - * - * In other words, when the screen is off and the device allowed to - * go into suspend mode, we don't want to be woken up, regardless of the - * setDelay() value, but the steps shall continue to be counted. - * - * The driver must however ensure that the internal step count never - * overflows. It is allowed in this situation to wake the SoC up so the - * driver can do the counter maintenance. - * - * While this sensor operates, it shall not disrupt any other sensors, in - * particular, but not limited to, the accelerometer; which might very well - * be in use as well. - * - * If a particular hardware cannot support these modes of operation then this - * sensor type MUST NOT be reported by the HAL. ie: it is not acceptable - * to "emulate" this sensor in the HAL. - * - * This sensor must be low power. That is, if the step detection cannot be - * done in hardware, this sensor should not be defined. Also, when the - * step counter is activated and the accelerometer is not, only steps should - * trigger interrupts (not accelerometer data). - * - * The whole point of this sensor type is to save power by keeping the - * SoC in suspend mode when the device is at rest. */ #define SENSOR_TYPE_STEP_COUNTER (19) @@ -776,18 +407,6 @@ enum { * * Similar to SENSOR_TYPE_ROTATION_VECTOR, but using a magnetometer instead * of using a gyroscope. - * - * This sensor must be based on a magnetometer. It cannot be implemented using - * a gyroscope, and gyroscope input cannot be used by this sensor, as the - * goal of this sensor is to be low power. - * The accelerometer can be (and usually is) used. - * - * Just like SENSOR_TYPE_ROTATION_VECTOR, this sensor reports an estimated - * heading accuracy: - * sensors_event_t.data[4] = estimated_accuracy (in radians) - * The heading error must be less than estimated_accuracy 95% of the time - * - * see SENSOR_TYPE_ROTATION_VECTOR for more details */ #define SENSOR_TYPE_GEOMAGNETIC_ROTATION_VECTOR (20) #define SENSOR_STRING_TYPE_GEOMAGNETIC_ROTATION_VECTOR "android.sensor.geomagnetic_rotation_vector" @@ -806,6 +425,135 @@ enum { #define SENSOR_TYPE_HEART_RATE (21) #define SENSOR_STRING_TYPE_HEART_RATE "android.sensor.heart_rate" +/* + * SENSOR_TYPE_NON_WAKE_UP_PROXIMITY_SENSOR + * Same as proximity_sensor but does not wake up the AP from suspend mode. + * wake-up sensor: no + */ +#define SENSOR_TYPE_NON_WAKE_UP_PROXIMITY_SENSOR (22) +#define SENSOR_STRING_TYPE_NON_WAKE_UP_PROXIMITY_SENSOR "android.sensor.non_wake_up_proximity_sensor" + +/* + * The sensors below are wake_up variants of the base sensor types defined + * above. When registered in batch mode, these sensors will wake up the AP when + * their FIFOs are full or when the batch timeout expires. A separate FIFO has + * to be maintained for wake up sensors and non wake up sensors. The non wake-up + * sensors need to overwrite their FIFOs when they are full till the AP wakes up + * and the wake-up sensors will wake-up the AP when their FIFOs are full or when + * the batch timeout expires without losing events. + * Note: Sensors of type SENSOR_TYPE_PROXIMITY are also wake up sensors and + * should be treated as such. Wake-up one-shot sensors like SIGNIFICANT_MOTION + * cannot be batched, hence the text about batch above doesn't apply to them. + * + * Define these sensors only if: + * 1) batching is supported. + * 2) wake-up and non wake-up variants of each sensor can be activated at + * different rates. + * + * wake-up sensor: yes + * Set SENSOR_FLAG_WAKE_UP flag for all these sensors. + */ +#define SENSOR_TYPE_WAKE_UP_ACCELEROMETER (23) +#define SENSOR_STRING_TYPE_WAKE_UP_ACCELEROMETER "android.sensor.wake_up_accelerometer" + +#define SENSOR_TYPE_WAKE_UP_MAGNETIC_FIELD (24) +#define SENSOR_STRING_TYPE_WAKE_UP_MAGNETIC_FIELD "android.sensor.wake_up_magnetic_field" + +#define SENSOR_TYPE_WAKE_UP_ORIENTATION (25) +#define SENSOR_STRING_TYPE_WAKE_UP_ORIENTATION "android.sensor.wake_up_orientation" + +#define SENSOR_TYPE_WAKE_UP_GYROSCOPE (26) +#define SENSOR_STRING_TYPE_WAKE_UP_GYROSCOPE "android.sensor.wake_up_gyroscope" + +#define SENSOR_TYPE_WAKE_UP_LIGHT (27) +#define SENSOR_STRING_TYPE_WAKE_UP_LIGHT "android.sensor.wake_up_light" + +#define SENSOR_TYPE_WAKE_UP_PRESSURE (28) +#define SENSOR_STRING_TYPE_WAKE_UP_PRESSURE "android.sensor.wake_up_pressure" + +#define SENSOR_TYPE_WAKE_UP_GRAVITY (29) +#define SENSOR_STRING_TYPE_WAKE_UP_GRAVITY "android.sensor.wake_up_gravity" + +#define SENSOR_TYPE_WAKE_UP_LINEAR_ACCELERATION (30) +#define SENSOR_STRING_TYPE_WAKE_UP_LINEAR_ACCELERATION "android.sensor.wake_up_linear_acceleration" + +#define SENSOR_TYPE_WAKE_UP_ROTATION_VECTOR (31) +#define SENSOR_STRING_TYPE_WAKE_UP_ROTATION_VECTOR "android.sensor.wake_up_rotation_vector" + +#define SENSOR_TYPE_WAKE_UP_RELATIVE_HUMIDITY (32) +#define SENSOR_STRING_TYPE_WAKE_UP_RELATIVE_HUMIDITY "android.sensor.wake_up_relative_humidity" + +#define SENSOR_TYPE_WAKE_UP_AMBIENT_TEMPERATURE (33) +#define SENSOR_STRING_TYPE_WAKE_UP_AMBIENT_TEMPERATURE "android.sensor.wake_up_ambient_temperature" + +#define SENSOR_TYPE_WAKE_UP_MAGNETIC_FIELD_UNCALIBRATED (34) +#define SENSOR_STRING_TYPE_WAKE_UP_MAGNETIC_FIELD_UNCALIBRATED "android.sensor.wake_up_magnetic_field_uncalibrated" + +#define SENSOR_TYPE_WAKE_UP_GAME_ROTATION_VECTOR (35) +#define SENSOR_STRING_TYPE_WAKE_UP_GAME_ROTATION_VECTOR "android.sensor.wake_up_game_rotation_vector" + +#define SENSOR_TYPE_WAKE_UP_GYROSCOPE_UNCALIBRATED (36) +#define SENSOR_STRING_TYPE_WAKE_UP_GYROSCOPE_UNCALIBRATED "android.sensor.wake_up_gyroscope_uncalibrated" + +#define SENSOR_TYPE_WAKE_UP_STEP_DETECTOR (37) +#define SENSOR_STRING_TYPE_WAKE_UP_STEP_DETECTOR "android.sensor.wake_up_step_detector" + +#define SENSOR_TYPE_WAKE_UP_STEP_COUNTER (38) +#define SENSOR_STRING_TYPE_WAKE_UP_STEP_COUNTER "android.sensor.wake_up_step_counter" + +#define SENSOR_TYPE_WAKE_UP_GEOMAGNETIC_ROTATION_VECTOR (39) +#define SENSOR_STRING_TYPE_WAKE_UP_GEOMAGNETIC_ROTATION_VECTOR "android.sensor.wake_up_geomagnetic_rotation_vector" + +#define SENSOR_TYPE_WAKE_UP_HEART_RATE (40) +#define SENSOR_STRING_TYPE_WAKE_UP_HEART_RATE "android.sensor.wake_up_heart_rate" + +/* + * SENSOR_TYPE_WAKE_UP_TILT_DETECTOR + * trigger-mode: special (setDelay has no impact) + * wake-up sensor: yes (set SENSOR_FLAG_WAKE_UP flag) + * + * A sensor of this type generates an event each time a tilt event is detected. A tilt event + * should be generated if the direction of the 2-seconds window average gravity changed by at least + * 35 degrees since the activation of the sensor. + * initial_estimated_gravity = average of accelerometer measurements over the first + * 1 second after activation. + * current_estimated_gravity = average of accelerometer measurements over the last 2 seconds. + * trigger when angle (initial_estimated_gravity, current_estimated_gravity) > 35 degrees + * + * Large accelerations without a change in phone orientation should not trigger a tilt event. + * For example, a sharp turn or strong acceleration while driving a car should not trigger a tilt + * event, even though the angle of the average acceleration might vary by more than 35 degrees. + * + * Typically, this sensor is implemented with the help of only an accelerometer. Other sensors can + * be used as well if they do not increase the power consumption significantly. This is a low power + * sensor that should allow the AP to go into suspend mode. Do not emulate this sensor in the HAL. + * Like other wake up sensors, the driver is expected to a hold a wake_lock with a timeout of 200 ms + * while reporting this event. The only allowed return value is 1.0. + */ +#define SENSOR_TYPE_WAKE_UP_TILT_DETECTOR (41) +#define SENSOR_STRING_TYPE_WAKE_UP_TILT_DETECTOR "android.sensor.wake_up_tilt_detector" + +/* + * SENSOR_TYPE_WAKE_GESTURE + * trigger-mode: one-shot + * wake-up sensor: yes (set SENSOR_FLAG_WAKE_UP flag) + * + * A sensor enabling waking up the device based on a device specific motion. + * + * When this sensor triggers, the device behaves as if the power button was + * pressed, turning the screen on. This behavior (turning on the screen when + * this sensor triggers) might be deactivated by the user in the device + * settings. Changes in settings do not impact the behavior of the sensor: + * only whether the framework turns the screen on when it triggers. + * + * The actual gesture to be detected is not specified, and can be chosen by + * the manufacturer of the device. + * This sensor must be low power, as it is likely to be activated 24/7. + * The only allowed value to return is 1.0. + */ +#define SENSOR_TYPE_WAKE_GESTURE (42) +#define SENSOR_STRING_TYPE_WAKE_GESTURE "android.sensor.wake_gesture" + /** * Values returned by the accelerometer in various locations in the universe. * all values are in SI units (m/s^2) @@ -952,7 +700,11 @@ typedef struct sensors_event_t { uint64_t step_counter; } u64; }; - uint32_t reserved1[4]; + + /* Reserved flags for internal use. Set to zero. */ + uint32_t flags; + + uint32_t reserved1[3]; } sensors_event_t; @@ -992,7 +744,7 @@ struct sensor_t { * must increase when the driver is updated in a way that changes the * output of this sensor. This is important for fused sensors when the * fusion algorithm is updated. - */ + */ int version; /* handle that identifies this sensors. This handle is used to reference @@ -1053,15 +805,40 @@ struct sensor_t { */ const char* requiredPermission; + /* This value is defined only for continuous mode sensors. It is the delay between two + * sensor events corresponding to the lowest frequency that this sensor supports. When + * lower frequencies are requested through batch()/setDelay() the events will be generated + * at this frequency instead. It can be used by the framework or applications to estimate + * when the batch FIFO may be full. + * NOTE: period_ns is in nanoseconds where as maxDelay/minDelay are in microseconds. + * continuous: maximum sampling period allowed in microseconds. + * on-change, one-shot, special : -1 + * Availability: SENSORS_DEVICE_API_VERSION_1_3 + */ + #ifdef __LP64__ + int64_t maxDelay; + #else + int32_t maxDelay; + #endif + + /* Flags for sensor. See SENSOR_FLAG_* above. */ + #ifdef __LP64__ + uint64_t flags; + #else + uint32_t flags; + #endif + /* reserved fields, must be zero */ - void* reserved[4]; + void* reserved[2]; }; /* * sensors_poll_device_t is used with SENSORS_DEVICE_API_VERSION_0_1 * and is present for backward binary and source compatibility. - * (see documentation of the hooks in struct sensors_poll_device_1 below) + * See the Sensors HAL interface section for complete descriptions of the + * following functions: + * http://source.android.com/devices/sensors/index.html#hal */ struct sensors_poll_device_t { struct hw_device_t common; @@ -1086,70 +863,26 @@ typedef struct sensors_poll_device_1 { struct { struct hw_device_t common; - /* Activate/de-activate one sensor. + /* Activate/de-activate one sensor. Return 0 on success, negative * * handle is the handle of the sensor to change. * enabled set to 1 to enable, or 0 to disable the sensor. * - * if enabled is set to 1, the sensor is activated even if - * setDelay() wasn't called before. In this case, a default rate - * should be used. - * - * unless otherwise noted in the sensor types definitions, an - * activated sensor never prevents the SoC to go into suspend - * mode; that is, the HAL shall not hold a partial wake-lock on - * behalf of applications. - * - * one-shot sensors de-activate themselves automatically upon - * receiving an event and they must still accept to be deactivated - * through a call to activate(..., ..., 0). - * - * if "enabled" is 1 and the sensor is already activated, this - * function is a no-op and succeeds. - * - * if "enabled" is 0 and the sensor is already de-activated, - * this function is a no-op and succeeds. - * - * return 0 on success, negative errno code otherwise + * Return 0 on success, negative errno code otherwise. */ int (*activate)(struct sensors_poll_device_t *dev, int handle, int enabled); /** - * Set the events's period in nanoseconds for a given sensor. - * - * What the period_ns parameter means depends on the specified - * sensor's trigger mode: - * - * continuous: setDelay() sets the sampling rate. - * on-change: setDelay() limits the delivery rate of events - * one-shot: setDelay() is ignored. it has no effect. - * special: see specific sensor type definitions - * - * For continuous and on-change sensors, if the requested value is - * less than sensor_t::minDelay, then it's silently clamped to - * sensor_t::minDelay unless sensor_t::minDelay is 0, in which - * case it is clamped to >= 1ms. - * - * setDelay will not be called when the sensor is in batching mode. - * In this case, batch() will be called with the new period. - * - * @return 0 if successful, < 0 on error + * Set the events's period in nanoseconds for a given sensor. If + * period_ns > max_delay it will be truncated to max_delay and if + * period_ns < min_delay it will be replaced by min_delay. */ int (*setDelay)(struct sensors_poll_device_t *dev, int handle, int64_t period_ns); /** * Returns an array of sensor data. - * This function must block until events are available. - * - * return the number of events read on success, or -errno in case - * of an error. - * - * The number of events returned in data must be less or equal - * to the "count" argument. - * - * This function shall never return 0 (no event). */ int (*poll)(struct sensors_poll_device_t *dev, sensors_event_t* data, int count); @@ -1158,200 +891,9 @@ typedef struct sensors_poll_device_1 { /* - * Enables batch mode for the given sensor and sets the delay between events - * - * A timeout value of zero disables batch mode for the given sensor. - * - * The period_ns parameter is equivalent to calling setDelay() -- this - * function both enables or disables the batch mode AND sets the events's - * period in nanosecond. See setDelay() above for a detailed explanation of - * the period_ns parameter. - * - * BATCH MODE: - * ----------- - * In non-batch mode, all sensor events must be reported as soon as they - * are detected. For example, an accelerometer activated at 50Hz will - * trigger interrupts 50 times per second. - * While in batch mode, sensor events do not need to be reported as soon - * as they are detected. They can be temporarily stored in batches and - * reported in batches, as long as no event is delayed by more than - * "timeout" nanoseconds. That is, all events since the previous batch - * are recorded and returned all at once. This allows to reduce the amount - * of interrupts sent to the SoC, and allow the SoC to switch to a lower - * power state (Idle) while the sensor is capturing and batching data. - * - * setDelay() is not affected and it behaves as usual. - * - * Each event has a timestamp associated with it, the timestamp - * must be accurate and correspond to the time at which the event - * physically happened. - * - * Batching does not modify the behavior of poll(): batches from different - * sensors can be interleaved and split. As usual, all events from the same - * sensor are time-ordered. - * - * BEHAVIOUR OUTSIDE OF SUSPEND MODE: - * ---------------------------------- - * - * When the SoC is awake (not in suspend mode), events must be reported in - * batches at least every "timeout". No event shall be dropped or lost. - * If internal h/w FIFOs fill-up before the timeout, then events are - * reported at that point to ensure no event is lost. - * - * - * NORMAL BEHAVIOR IN SUSPEND MODE: - * --------------------------------- - * - * By default, batch mode doesn't significantly change the interaction with - * suspend mode. That is, sensors must continue to allow the SoC to - * go into suspend mode and sensors must stay active to fill their - * internal FIFO. In this mode, when the FIFO fills up, it shall wrap - * around (basically behave like a circular buffer, overwriting events). - * As soon as the SoC comes out of suspend mode, a batch is produced with - * as much as the recent history as possible, and batch operation - * resumes as usual. - * - * The behavior described above allows applications to record the recent - * history of a set of sensor while keeping the SoC into suspend. It - * also allows the hardware to not have to rely on a wake-up interrupt line. - * - * WAKE_UPON_FIFO_FULL BEHAVIOR IN SUSPEND MODE: - * ---------------------------------------------- - * - * There are cases, however, where an application cannot afford to lose - * any events, even when the device goes into suspend mode. - * For a given rate, if a sensor has the capability to store at least 10 - * seconds worth of events in its FIFO and is able to wake up the Soc, it - * can implement an optional secondary mode: the WAKE_UPON_FIFO_FULL mode. - * - * The caller will set the SENSORS_BATCH_WAKE_UPON_FIFO_FULL flag to - * activate this mode. If the sensor does not support this mode, batch() - * will fail when the flag is set. - * - * When running with the WAKE_UPON_FIFO_FULL flag set, no events can be - * lost. When the FIFO is getting full, the sensor must wake up the SoC from - * suspend and return a batch before the FIFO fills-up. - * Depending on the device, it might take a few miliseconds for the SoC to - * entirely come out of suspend and start flushing the FIFO. Enough head - * room must be allocated in the FIFO to allow the device to entirely come - * out of suspend without the FIFO overflowing (no events shall be lost). - * - * Implementing the WAKE_UPON_FIFO_FULL mode is optional. - * If the hardware cannot support this mode, or if the physical - * FIFO is so small that the device would never be allowed to go into - * suspend for at least 10 seconds, then this function MUST fail when - * the flag SENSORS_BATCH_WAKE_UPON_FIFO_FULL is set, regardless of - * the value of the timeout parameter. - * - * - * DRY RUN: - * -------- - * - * If the flag SENSORS_BATCH_DRY_RUN is set, this function returns - * without modifying the batch mode or the event period and has no side - * effects, but returns errors as usual (as it would if this flag was - * not set). This flag is used to check if batch mode is available for a - * given configuration -- in particular for a given sensor at a given rate. - * - * - * Return values: - * -------------- - * - * Because sensors must be independent, the return value must not depend - * on the state of the system (whether another sensor is on or not), - * nor on whether the flag SENSORS_BATCH_DRY_RUN is set (in other words, - * if a batch call with SENSORS_BATCH_DRY_RUN is successful, - * the same call without SENSORS_BATCH_DRY_RUN must succeed as well). - * - * When timeout is not 0: - * If successful, 0 is returned. - * If the specified sensor doesn't support batch mode, return -EINVAL. - * If the specified sensor's trigger-mode is one-shot, return -EINVAL. - * If WAKE_UPON_FIFO_FULL is specified and the specified sensor's internal - * FIFO is too small to store at least 10 seconds worth of data at the - * given rate, -EINVAL is returned. Note that as stated above, this has to - * be determined at compile time, and not based on the state of the - * system. - * If some other constraints above cannot be satisfied, return -EINVAL. - * - * Note: the timeout parameter, when > 0, has no impact on whether this - * function succeeds or fails. - * - * When timeout is 0: - * The caller will never set the wake_upon_fifo_full flag. - * The function must succeed, and batch mode must be deactivated. - * - * Independently of whether DRY_RUN is specified, When the call to batch() - * fails, no state should be changed. In particular, a failed call to - * batch() should not change the rate of the sensor. Example: - * setDelay(..., 10ms) - * batch(..., 20ms, ...) fails - * rate should stay 10ms. - * - * - * IMPLEMENTATION NOTES: - * --------------------- - * - * Batch mode, if supported, should happen at the hardware level, - * typically using hardware FIFOs. In particular, it SHALL NOT be - * implemented in the HAL, as this would be counter productive. - * The goal here is to save significant amounts of power. - * - * In some implementations, events from several sensors can share the - * same physical FIFO. In that case, all events in the FIFO can be sent and - * processed by the HAL as soon as one batch must be reported. - * For example, if the following sensors are activated: - * - accelerometer batched with timeout = 20s - * - gyroscope batched with timeout = 5s - * then the accelerometer batches can be reported at the same time the - * gyroscope batches are reported (every 5 seconds) - * - * Batch mode can be enabled or disabled at any time, in particular - * while the specified sensor is already enabled, and this shall not - * result in the loss of events. - * - * COMPARATIVE IMPORTANCE OF BATCHING FOR DIFFERENT SENSORS: - * --------------------------------------------------------- - * - * On platforms on which hardware fifo size is limited, the system designers - * might have to choose how much fifo to reserve for each sensor. To help - * with this choice, here is a list of applications made possible when - * batching is implemented on the different sensors. - * - * High value: Low power pedestrian dead reckoning - * Target batching time: 20 seconds to 1 minute - * Sensors to batch: - * - Step detector - * - Rotation vector or game rotation vector at 5Hz - * Gives us step and heading while letting the SoC go to Suspend. - * - * High value: Medium power activity/gesture recognition - * Target batching time: 3 seconds - * Sensors to batch: accelerometer between 20Hz and 50Hz - * Allows recognizing arbitrary activities and gestures without having - * to keep the SoC fully awake while the data is collected. - * - * Medium-high value: Interrupt load reduction - * Target batching time: < 1 second - * Sensors to batch: any high frequency sensor. - * If the gyroscope is set at 800Hz, even batching just 10 gyro events can - * reduce the number of interrupts from 800/second to 80/second. - * - * Medium value: Continuous low frequency data collection - * Target batching time: > 1 minute - * Sensors to batch: barometer, humidity sensor, other low frequency - * sensors. - * Allows creating monitoring applications at low power. - * - * Medium value: Continuous full-sensors collection - * Target batching time: > 1 minute - * Sensors to batch: all, at high frequencies - * Allows full collection of sensor data while leaving the SoC in - * suspend mode. Only to consider if fifo space is not an issue. - * - * In each of the cases above, if WAKE_UPON_FIFO_FULL is implemented, the - * applications might decide to let the SoC go to suspend, allowing for even - * more power savings. + * Enables batch mode for the given sensor and sets the delay between events. + * See the Batching sensor results page for details: + * http://source.android.com/devices/sensors/batching.html */ int (*batch)(struct sensors_poll_device_1* dev, int handle, int flags, int64_t period_ns, int64_t timeout); @@ -1359,29 +901,7 @@ typedef struct sensors_poll_device_1 { /* * Flush adds a META_DATA_FLUSH_COMPLETE event (sensors_event_meta_data_t) * to the end of the "batch mode" FIFO for the specified sensor and flushes - * the FIFO; those events are delivered as usual (i.e.: as if the batch - * timeout had expired) and removed from the FIFO. - * - * See the META_DATA_FLUSH_COMPLETE section for details about the - * META_DATA_FLUSH_COMPLETE event. - * - * The flush happens asynchronously (i.e.: this function must return - * immediately). - * - * If the implementation uses a single FIFO for several sensors, that - * FIFO is flushed and the META_DATA_FLUSH_COMPLETE event is added only - * for the specified sensor. - * - * If the specified sensor wasn't in batch mode, flush succeeds and - * promptly sends a META_DATA_FLUSH_COMPLETE event for that sensor. - * - * If the FIFO was empty at the time of the call, flush returns - * 0 (success) and promptly sends a META_DATA_FLUSH_COMPLETE event - * for that sensor. - * - * If the specified sensor wasn't enabled, flush returns -EINVAL. - * - * return 0 on success, negative errno code otherwise. + * the FIFO. */ int (*flush)(struct sensors_poll_device_1* dev, int handle); diff --git a/include/hardware/tv_input.h b/include/hardware/tv_input.h new file mode 100644 index 0000000..f2d03f1 --- /dev/null +++ b/include/hardware/tv_input.h @@ -0,0 +1,333 @@ +/* + * Copyright 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ANDROID_TV_INPUT_INTERFACE_H +#define ANDROID_TV_INPUT_INTERFACE_H + +#include <stdint.h> +#include <sys/cdefs.h> +#include <sys/types.h> + +#include <hardware/hardware.h> +#include <system/window.h> + +__BEGIN_DECLS + +/* + * Module versioning information for the TV input hardware module, based on + * tv_input_module_t.common.module_api_version. + * + * Version History: + * + * TV_INPUT_MODULE_API_VERSION_0_1: + * Initial TV input hardware module API. + * + */ + +#define TV_INPUT_MODULE_API_VERSION_0_1 HARDWARE_MODULE_API_VERSION(0, 1) + +#define TV_INPUT_DEVICE_API_VERSION_0_1 HARDWARE_DEVICE_API_VERSION(0, 1) + +/* + * The id of this module + */ +#define TV_INPUT_HARDWARE_MODULE_ID "tv_input" + +#define TV_INPUT_DEFAULT_DEVICE "default" + +/*****************************************************************************/ + +/* + * Every hardware module must have a data structure named HAL_MODULE_INFO_SYM + * and the fields of this data structure must begin with hw_module_t + * followed by module specific information. + */ +typedef struct tv_input_module { + struct hw_module_t common; +} tv_input_module_t; + +/*****************************************************************************/ + +typedef enum tv_input_type { + /* HDMI */ + TV_INPUT_TYPE_HDMI = 1, + + /* Built-in tuners. */ + TV_INPUT_TYPE_BUILT_IN_TUNER = 2, + + /* Passthrough */ + TV_INPUT_TYPE_PASSTHROUGH = 3, +} tv_input_type_t; + +typedef struct tv_input_device_info { + /* Device ID */ + int device_id; + + /* Type of physical TV input. */ + tv_input_type_t type; + + /* + * TODO: A union of type specific information. For example, HDMI port + * identifier that HDMI hardware understands. + */ + + /* TODO: Add capability if necessary. */ + + /* TODO: Audio info */ +} tv_input_device_info_t; + +typedef enum { + /* + * Hardware notifies the framework that a device is available. + */ + TV_INPUT_EVENT_DEVICE_AVAILABLE = 1, + /* + * Hardware notifies the framework that a device is unavailable. + */ + TV_INPUT_EVENT_DEVICE_UNAVAILABLE = 2, + /* + * Stream configurations are changed. Client should regard all open streams + * at the specific device are closed, and should call + * get_stream_configurations() again, opening some of them if necessary. + */ + TV_INPUT_EVENT_STREAM_CONFIGURATIONS_CHANGED = 3, + /* + * Hardware is done with capture request with the buffer. Client can assume + * ownership of the buffer again. + */ + TV_INPUT_EVENT_CAPTURE_SUCCEEDED = 4, + /* + * Hardware met a failure while processing a capture request or client + * canceled the request. Client can assume ownership of the buffer again. + */ + TV_INPUT_EVENT_CAPTURE_FAILED = 5, +} tv_input_event_type_t; + +typedef struct tv_input_capture_result { + /* Device ID */ + int device_id; + + /* Stream ID */ + int stream_id; + + /* Sequence number of the request */ + uint32_t seq; + + /* + * The buffer passed to hardware in request_capture(). The content of + * buffer is undefined (although buffer itself is valid) for + * TV_INPUT_CAPTURE_FAILED event. + */ + buffer_handle_t buffer; + + /* + * Error code for the request. -ECANCELED if request is cancelled; other + * error codes are unknown errors. + */ + int error_code; +} tv_input_capture_result_t; + +typedef struct tv_input_event { + tv_input_event_type_t type; + + union { + /* + * TV_INPUT_EVENT_DEVICE_AVAILABLE: all fields are relevant + * TV_INPUT_EVENT_DEVICE_UNAVAILABLE: only device_id is relevant + * TV_INPUT_EVENT_STREAM_CONFIGURATIONS_CHANGED: only device_id is + * relevant + */ + tv_input_device_info_t device_info; + /* + * TV_INPUT_EVENT_CAPTURE_SUCCEEDED: error_code is not relevant + * TV_INPUT_EVENT_CAPTURE_FAILED: all fields are relevant + */ + tv_input_capture_result_t capture_result; + }; +} tv_input_event_t; + +typedef struct tv_input_callback_ops { + /* + * event contains the type of the event and additional data if necessary. + * The event object is guaranteed to be valid only for the duration of the + * call. + * + * data is an object supplied at device initialization, opaque to the + * hardware. + */ + void (*notify)(struct tv_input_device* dev, + tv_input_event_t* event, void* data); +} tv_input_callback_ops_t; + +typedef enum { + TV_STREAM_TYPE_INDEPENDENT_VIDEO_SOURCE = 1, + TV_STREAM_TYPE_BUFFER_PRODUCER = 2, +} tv_stream_type_t; + +typedef struct tv_stream_config { + /* + * ID number of the stream. This value is used to identify the whole stream + * configuration. + */ + int stream_id; + + /* Type of the stream */ + tv_stream_type_t type; + + /* Max width/height of the stream. */ + uint32_t max_video_width; + uint32_t max_video_height; +} tv_stream_config_t; + +typedef struct buffer_producer_stream { + /* + * IN/OUT: Width / height of the stream. Client may request for specific + * size but hardware may change it. Client must allocate buffers with + * specified width and height. + */ + uint32_t width; + uint32_t height; + + /* OUT: Client must set this usage when allocating buffer. */ + uint32_t usage; + + /* OUT: Client must allocate a buffer with this format. */ + uint32_t format; +} buffer_producer_stream_t; + +typedef struct tv_stream { + /* IN: ID in the stream configuration */ + int stream_id; + + /* OUT: Type of the stream (for convenience) */ + tv_stream_type_t type; + + /* Data associated with the stream for client's use */ + union { + /* OUT: A native handle describing the sideband stream source */ + native_handle_t* sideband_stream_source_handle; + + /* IN/OUT: Details are in buffer_producer_stream_t */ + buffer_producer_stream_t buffer_producer; + }; +} tv_stream_t; + +/* + * Every device data structure must begin with hw_device_t + * followed by module specific public methods and attributes. + */ +typedef struct tv_input_device { + struct hw_device_t common; + + /* + * initialize: + * + * Provide callbacks to the device and start operation. At first, no device + * is available and after initialize() completes, currently available + * devices including static devices should notify via callback. + * + * Framework owns callbacks object. + * + * data is a framework-owned object which would be sent back to the + * framework for each callback notifications. + * + * Return 0 on success. + */ + int (*initialize)(struct tv_input_device* dev, + const tv_input_callback_ops_t* callback, void* data); + + /* + * get_stream_configurations: + * + * Get stream configurations for a specific device. An input device may have + * multiple configurations. + * + * The configs object is guaranteed to be valid only until the next call to + * get_stream_configurations() or STREAM_CONFIGURATIONS_CHANGED event. + * + * Return 0 on success. + */ + int (*get_stream_configurations)(const struct tv_input_device* dev, + int device_id, int* num_configurations, + const tv_stream_config_t** configs); + + /* + * open_stream: + * + * Open a stream with given stream ID. Caller owns stream object, and the + * populated data is only valid until the stream is closed. + * + * Return 0 on success; -EBUSY if the client should close other streams to + * open the stream; -EEXIST if the stream with the given ID is already open; + * -EINVAL if device_id and/or stream_id are invalid; other non-zero value + * denotes unknown error. + */ + int (*open_stream)(struct tv_input_device* dev, int device_id, + tv_stream_t* stream); + + /* + * close_stream: + * + * Close a stream to a device. data in tv_stream_t* object associated with + * the stream_id is obsolete once this call finishes. + * + * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if + * device_id and/or stream_id are invalid. + */ + int (*close_stream)(struct tv_input_device* dev, int device_id, + int stream_id); + + /* + * request_capture: + * + * Request buffer capture for a stream. This is only valid for buffer + * producer streams. The buffer should be created with size, format and + * usage specified in the stream. Framework provides seq in an + * increasing sequence per each stream. Hardware should provide the picture + * in a chronological order according to seq. For example, if two + * requests are being processed at the same time, the request with the + * smaller seq should get an earlier frame. + * + * The framework releases the ownership of the buffer upon calling this + * function. When the buffer is filled, hardware notifies the framework + * via TV_INPUT_EVENT_CAPTURE_FINISHED callback, and the ownership is + * transferred back to framework at that time. + * + * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if + * device_id and/or stream_id are invalid; -EWOULDBLOCK if HAL cannot take + * additional requests until it releases a buffer. + */ + int (*request_capture)(struct tv_input_device* dev, int device_id, + int stream_id, buffer_handle_t buffer, uint32_t seq); + + /* + * cancel_capture: + * + * Cancel an ongoing capture. Hardware should release the buffer as soon as + * possible via TV_INPUT_EVENT_CAPTURE_FAILED callback. + * + * Return 0 on success; -ENOENT if the stream is not open; -EINVAL if + * device_id, stream_id, and/or seq are invalid. + */ + int (*cancel_capture)(struct tv_input_device* dev, int device_id, + int stream_id, uint32_t seq); + + void* reserved[16]; +} tv_input_device_t; + +__END_DECLS + +#endif // ANDROID_TV_INPUT_INTERFACE_H |