diff options
Diffstat (limited to 'security/smc')
36 files changed, 11946 insertions, 2312 deletions
diff --git a/security/smc/Kconfig b/security/smc/Kconfig index 9fcd1f6..7a933ac 100644 --- a/security/smc/Kconfig +++ b/security/smc/Kconfig @@ -1,11 +1,11 @@ -config TF_MSHIELD +config TF_ZEBRA bool config SECURITY_MIDDLEWARE_COMPONENT bool "Enable SMC Driver" depends on ARCH_OMAP3 || ARCH_OMAP4 default n - select TF_MSHIELD + select TF_ZEBRA help This option adds kernel support for communication with the SMC Protected Application. diff --git a/security/smc/Makefile b/security/smc/Makefile index 80cf430..abf0095 100644 --- a/security/smc/Makefile +++ b/security/smc/Makefile @@ -1,3 +1,48 @@ -ifeq ($(CONFIG_SECURITY_MIDDLEWARE_COMPONENT),y) -obj-$(CONFIG_ARCH_OMAP4) += omap4/ +# +# Copyright (c) 2006-2010 Trusted Logic S.A. +# All Rights Reserved. +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, +# MA 02111-1307 USA +# + +ifdef S_VERSION_BUILD +EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD) endif + +EXTRA_CFLAGS += -Iarch/arm/mach-omap2 +EXTRA_CFLAGS += -Iarch/arm/plat-omap/include/plat +EXTRA_CFLAGS += -DCONFIG_TF_TEEC +EXTRA_CFLAGS += -DCONFIG_TF_ION + +tf_driver-objs += tf_util.o +tf_driver-objs += tf_conn.o +tf_driver-objs += tf_device.o +tf_driver-objs += tf_comm.o +tf_driver-objs += tf_crypto.o +tf_driver-objs += tf_crypto_digest.o +tf_driver-objs += tf_crypto_aes.o +tf_driver-objs += tf_crypto_des.o +tf_driver-objs += tf_dma.o +tf_driver-objs += tf_comm_mshield.o +tf_driver-objs += tf_device_mshield.o +tf_driver-objs += bridge_pub2sec.o +tf_driver-objs += tf_teec.o + +obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += tf_driver.o +obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += rproc_drm.o + +plus_sec := $(call as-instr,.arch_extension sec,+sec) +AFLAGS_bridge_pub2sec.o :=-Wa,-march=armv7-a$(plus_sec) diff --git a/security/smc/bridge_pub2sec.S b/security/smc/bridge_pub2sec.S new file mode 100644 index 0000000..15cd3b7 --- /dev/null +++ b/security/smc/bridge_pub2sec.S @@ -0,0 +1,242 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +.text + +#define SMICODEPUB_IRQ_END 0xFE +#define SMICODEPUB_FIQ_END 0xFD +#define SMICODEPUB_RPC_END 0xFC + +#define PUB2SEC_NOCST 0xFF +#define SMICODEPUB_NEWTASK 0x00 + +/* + * RPC status: + * - 0: the secure world yielded due to an interrupt + * - 1: the secure world yielded on an RPC (no public thread is handling it) + * - 2: the secure world yielded on an RPC and the response is ready + */ +#define RPC_ADVANCEMENT_NONE 0 +#define RPC_ADVANCEMENT_PENDING 1 +#define RPC_ADVANCEMENT_FINISHED 2 + +#ifdef CONFIG_ARM_ERRATA_430973 +#define INVALIDATE_BTB MCR p15, 0, R0, c7, c5, 6 +#else +#define INVALIDATE_BTB +#endif + +schedule_secure_world: + .global schedule_secure_world + + /* Save registers */ + push {r4-r12, lr} + + /* Copy the Secure Service ID in r12 */ + mov r12, r0 + + cmp r0, #SMICODEPUB_IRQ_END + beq return_from_irq + + cmp r0, #SMICODEPUB_RPC_END + beq return_from_rpc + + mov r6, #PUB2SEC_NOCST + mov r12, #SMICODEPUB_NEWTASK + + b label_smc + +return_from_rpc: + ldr r9, =g_RPC_parameters + ldm r9, {r0-r3} + /* fall through */ + +return_from_irq: + ldr r10, =g_secure_task_id + ldr r6, [r10] + + b label_smc + +label_smc: + INVALIDATE_BTB + dsb + dmb + +#ifdef CONFIG_BENCH_SECURE_CYCLE + /* Come from Non Secure: activate counter 1 (write to 0 are ignored) */ + mov r4, #0x00000002 + + /* Read Count Enable Set Register */ + mcr p15, 0x0, r4, c9, c12, 1 + + /* Come from Non Secure: stop counter 0 (write to 0 are ignored) */ + mov r4, #0x00000001 + + /* Write Count Enable Clear Register */ + mcr p15, 0x0, r4, c9, c12, 2 +#endif + + smc #0 + b service_end + nop + +#ifdef CONFIG_BENCH_SECURE_CYCLE + /* Come from Secure: activate counter 0 (write to 0 are ignored) */ + mov r4, #0x00000001 + + /* Write Count Enable Set Register */ + mcr p15, 0x0, r4, c9, c12, 1 + + /* Come from Secure: stop counter 1 (write to 0 are ignored) */ + mov r4, #0x00000002 + + /* Write Count Enable Clear Register */ + mcr p15, 0x0, r4, c9, c12, 2 +#endif + + INVALIDATE_BTB + ldr r8, =g_secure_task_id + str r6, [r8] + + mov r0, #0x00 + ldr r8, =g_service_end + str r0, [r8] + + b schedule_secure_world_exit + +service_end: + +schedule_secure_world_exit: +#ifdef CONFIG_BENCH_SECURE_CYCLE + /* Come from Secure: activate counter 0 (write to 0 are ignored) */ + mov r4, #0x00000001 + + /* Write Count Enable Set Register */ + mcr p15, 0x0, r4, c9, c12, 1 + + /* Come from Secure: stop counter 1 (write to 0 are ignored) */ + mov r4, #0x00000002 + + /* Write Count Enable Clear Register */ + mcr p15, 0x0, r4, c9, c12, 2 +#endif + + INVALIDATE_BTB + + /* Restore registers */ + pop {r4-r12, pc} + +rpc_handler: + .global rpc_handler + +#ifdef CONFIG_BENCH_SECURE_CYCLE + /* Come from Secure: activate counter 0 (write to 0 are ignored) */ + mov r4, #0x00000001 + + /* Write Count Enable Set Register */ + mcr p15, 0x0, r4, c9, c12, 1 + + /* Come from Secure: stop counter 1 (write to 0 are ignored) */ + mov r4, #0x00000002 + + /* Write Count Enable Clear Register */ + mcr p15, 0x0, r4, c9, c12, 2 +#endif + INVALIDATE_BTB + + /* g_RPC_advancement = RPC_ADVANCEMENT_PENDING */ + ldr r8, =g_RPC_advancement + mov r9, #RPC_ADVANCEMENT_PENDING + str r9, [r8] + + ldr r8, =g_RPC_parameters + stm r8, {r0-r3} + + ldr r8, =g_secure_task_id + str r6, [r8] + + mov r0, #0x00 + ldr r8, =g_service_end + str r0, [r8] + + /* Restore registers */ + pop {r4-r12, pc} + +#ifdef CONFIG_BENCH_SECURE_CYCLE + +setup_counters: + .global setup_counters + + push {r14} + + mrc p15, 0, r2, c9, c12, 0 + orr r2, r2, #0x3 + mcr p15, 0, r2, c9, c12, 0 + + mrc p15, 0, r2, c9, c12, 1 + orr r2, r2, #0x80000000 + mcr p15, 0, r2, c9, c12, 1 + + pop {pc} + +run_code_speed: + .global run_code_speed + + push {r14} + + /* Reset cycle counter */ + mov r2, #0 + mcr p15, 0, r2, c9, c13, 0 + +run_code_speed_loop: + sub r0, r0, #1 + cmp r0, #0 + bne run_code_speed_loop + + /* Read cycle counter */ + mrc p15, 0, r0, c9, c13, 0 + + pop {pc} + +run_data_speed: + .global run_data_speed + + push {r14} + + /* Reset cycle counter */ + mov r2, #0 + mcr p15, 0, r2, c9, c13, 0 + +run_data_speed_loop: + sub r0, r0, #1 + ldr r2, [r1] + cmp r0, #0 + bne run_data_speed_loop + + /* read cycle counter */ + mrc p15, 0, r0, c9, c13, 0 + + pop {pc} + +#endif + +read_mpidr: + .global read_mpidr + mrc p15, 0, r0, c0, c0, 5 + bx lr diff --git a/security/smc/omap4/Makefile b/security/smc/omap4/Makefile deleted file mode 100644 index de75cc2..0000000 --- a/security/smc/omap4/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -# -# Copyright (c) 2006-2010 Trusted Logic S.A. -# All Rights Reserved. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; either version 2 of -# the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, -# MA 02111-1307 USA -# - -ifdef S_VERSION_BUILD -EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD) -endif - -EXTRA_CFLAGS += -Iarch/arm/mach-omap2 - -tf_driver-objs += scxlnx_util.o -tf_driver-objs += scxlnx_device.o -tf_driver-objs += scx_public_crypto.o -tf_driver-objs += scx_public_crypto_Digest.o -tf_driver-objs += scx_public_crypto_AES.o -tf_driver-objs += scx_public_dma.o -tf_driver-objs += scxlnx_comm_mshield.o - -obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += tf_driver.o diff --git a/security/smc/omap4/scx_protocol.h b/security/smc/omap4/scx_protocol.h deleted file mode 100644 index 80653eb..0000000 --- a/security/smc/omap4/scx_protocol.h +++ /dev/null @@ -1,676 +0,0 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA - */ - -#ifndef __SCX_PROTOCOL_H__ -#define __SCX_PROTOCOL_H__ - -/*---------------------------------------------------------------------------- - * - * This header file defines the structure used in the SChannel Protocol. - * See your Product Reference Manual for a specification of the SChannel - * protocol. - *---------------------------------------------------------------------------*/ - -/* - * The driver interface version returned by the version ioctl - */ -#define SCX_DRIVER_INTERFACE_VERSION 0x04000000 - -/* - * Protocol version handling - */ -#define SCX_S_PROTOCOL_MAJOR_VERSION (0x06) -#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24) -#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF) - -/* - * The size, in bytes, of the L1 Shared Buffer. - */ -#define SCX_COMM_BUFFER_SIZE (0x1000) /* 4kB*/ - -/* - * The S flag of the nConfigFlags_S register. - */ -#define SCX_CONFIG_FLAG_S (1 << 3) - -/* - * The TimeSlot field of the nSyncSerial_N register. - */ -#define SCX_SYNC_SERIAL_TIMESLOT_N (1) - -/* - * nStatus_S related defines. - */ -#define SCX_STATUS_P_MASK (0X00000001) -#define SCX_STATUS_POWER_STATE_SHIFT (3) -#define SCX_STATUS_POWER_STATE_MASK (0x1F << SCX_STATUS_POWER_STATE_SHIFT) - -/* - * Possible power states of the POWER_STATE field of the nStatus_S register - */ -#define SCX_POWER_MODE_COLD_BOOT (0) -#define SCX_POWER_MODE_WARM_BOOT (1) -#define SCX_POWER_MODE_ACTIVE (3) -#define SCX_POWER_MODE_READY_TO_SHUTDOWN (5) -#define SCX_POWER_MODE_READY_TO_HIBERNATE (7) -#define SCX_POWER_MODE_WAKEUP (8) -#define SCX_POWER_MODE_PANIC (15) - -/* - * Possible nCommand values for MANAGEMENT commands - */ -#define SCX_MANAGEMENT_HIBERNATE (1) -#define SCX_MANAGEMENT_SHUTDOWN (2) -#define SCX_MANAGEMENT_PREPARE_FOR_CORE_OFF (3) -#define SCX_MANAGEMENT_RESUME_FROM_CORE_OFF (4) - -/* - * The capacity of the Normal Word message queue, in number of slots. - */ -#define SCX_N_MESSAGE_QUEUE_CAPACITY (512) - -/* - * The capacity of the Secure World message answer queue, in number of slots. - */ -#define SCX_S_ANSWER_QUEUE_CAPACITY (256) - -/* - * The value of the S-timeout register indicating an infinite timeout. - */ -#define SCX_S_TIMEOUT_0_INFINITE (0xFFFFFFFF) -#define SCX_S_TIMEOUT_1_INFINITE (0xFFFFFFFF) - -/* - * The value of the S-timeout register indicating an immediate timeout. - */ -#define SCX_S_TIMEOUT_0_IMMEDIATE (0x0) -#define SCX_S_TIMEOUT_1_IMMEDIATE (0x0) - -/* - * Identifies the get protocol version SMC. - */ -#define SCX_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB) - -/* - * Identifies the init SMC. - */ -#define SCX_SMC_INIT (0XFFFFFFFF) - -/* - * Identifies the reset irq SMC. - */ -#define SCX_SMC_RESET_IRQ (0xFFFFFFFE) - -/* - * Identifies the SET_W3B SMC. - */ -#define SCX_SMC_WAKE_UP (0xFFFFFFFD) - -/* - * Identifies the STOP SMC. - */ -#define SCX_SMC_STOP (0xFFFFFFFC) - -/* - * Identifies the n-yield SMC. - */ -#define SCX_SMC_N_YIELD (0X00000003) - - -/* Possible stop commands for SMC_STOP */ -#define SCSTOP_HIBERNATE (0xFFFFFFE1) -#define SCSTOP_SHUTDOWN (0xFFFFFFE2) - -/* - * representation of an UUID. - */ -struct SCX_UUID { - u32 time_low; - u16 time_mid; - u16 time_hi_and_version; - u8 clock_seq_and_node[8]; -}; - - -/** - * Command parameters. - */ -struct SCX_COMMAND_PARAM_VALUE { - u32 a; - u32 b; -}; - -struct SCX_COMMAND_PARAM_TEMP_MEMREF { - u32 nDescriptor; /* data pointer for exchange message.*/ - u32 nSize; - u32 nOffset; -}; - -struct SCX_COMMAND_PARAM_MEMREF { - u32 hBlock; - u32 nSize; - u32 nOffset; -}; - -union SCX_COMMAND_PARAM { - struct SCX_COMMAND_PARAM_VALUE sValue; - struct SCX_COMMAND_PARAM_TEMP_MEMREF sTempMemref; - struct SCX_COMMAND_PARAM_MEMREF sMemref; -}; - -/** - * Answer parameters. - */ -struct SCX_ANSWER_PARAM_VALUE { - u32 a; - u32 b; -}; - -struct SCX_ANSWER_PARAM_SIZE { - u32 _ignored; - u32 nSize; -}; - -union SCX_ANSWER_PARAM { - struct SCX_ANSWER_PARAM_SIZE sSize; - struct SCX_ANSWER_PARAM_VALUE sValue; -}; - -/* - * Descriptor tables capacity - */ -#define SCX_MAX_W3B_COARSE_PAGES (2) -#define SCX_MAX_COARSE_PAGES (8) -#define SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8) -#define SCX_DESCRIPTOR_TABLE_CAPACITY \ - (1 << SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) -#define SCX_DESCRIPTOR_TABLE_CAPACITY_MASK \ - (SCX_DESCRIPTOR_TABLE_CAPACITY - 1) -/* Shared memories coarse pages can map up to 1MB */ -#define SCX_MAX_COARSE_PAGE_MAPPED_SIZE \ - (PAGE_SIZE * SCX_DESCRIPTOR_TABLE_CAPACITY) -/* Shared memories cannot exceed 8MB */ -#define SCX_MAX_SHMEM_SIZE \ - (SCX_MAX_COARSE_PAGE_MAPPED_SIZE << 3) - -/* - * Buffer size for version description fields - */ -#define SCX_DESCRIPTION_BUFFER_LENGTH 64 - -/* - * Shared memory type flags. - */ -#define SCX_SHMEM_TYPE_READ (0x00000001) -#define SCX_SHMEM_TYPE_WRITE (0x00000002) - -/* - * Shared mem flags - */ -#define SCX_SHARED_MEM_FLAG_INPUT 1 -#define SCX_SHARED_MEM_FLAG_OUTPUT 2 -#define SCX_SHARED_MEM_FLAG_INOUT 3 - - -/* - * Parameter types - */ -#define SCX_PARAM_TYPE_NONE 0x0 -#define SCX_PARAM_TYPE_VALUE_INPUT 0x1 -#define SCX_PARAM_TYPE_VALUE_OUTPUT 0x2 -#define SCX_PARAM_TYPE_VALUE_INOUT 0x3 -#define SCX_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5 -#define SCX_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6 -#define SCX_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7 -#define SCX_PARAM_TYPE_MEMREF_INPUT 0xD -#define SCX_PARAM_TYPE_MEMREF_OUTPUT 0xE -#define SCX_PARAM_TYPE_MEMREF_INOUT 0xF - -#define SCX_PARAM_TYPE_MEMREF_FLAG 0x4 -#define SCX_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8 - - -#define SCX_MAKE_PARAM_TYPES(t0, t1, t2, t3) \ - ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12)) -#define SCX_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF) - -/* - * Login types. - */ -#define SCX_LOGIN_PUBLIC 0x00000000 -#define SCX_LOGIN_USER 0x00000001 -#define SCX_LOGIN_GROUP 0x00000002 -#define SCX_LOGIN_APPLICATION 0x00000004 -#define SCX_LOGIN_APPLICATION_USER 0x00000005 -#define SCX_LOGIN_APPLICATION_GROUP 0x00000006 -#define SCX_LOGIN_AUTHENTICATION 0x80000000 -#define SCX_LOGIN_PRIVILEGED 0x80000002 - -/* Login variants */ - -#define SCX_LOGIN_VARIANT(mainType, os, variant) \ - ((mainType) | (1 << 27) | ((os) << 16) | ((variant) << 8)) - -#define SCX_LOGIN_GET_MAIN_TYPE(type) \ - ((type) & ~SCX_LOGIN_VARIANT(0, 0xFF, 0xFF)) - -#define SCX_LOGIN_OS_ANY 0x00 -#define SCX_LOGIN_OS_LINUX 0x01 -#define SCX_LOGIN_OS_ANDROID 0x04 - -/* OS-independent variants */ -#define SCX_LOGIN_USER_NONE \ - SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANY, 0xFF) -#define SCX_LOGIN_GROUP_NONE \ - SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANY, 0xFF) -#define SCX_LOGIN_APPLICATION_USER_NONE \ - SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANY, 0xFF) -#define SCX_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \ - SCX_LOGIN_VARIANT(SCX_LOGIN_AUTHENTICATION, SCX_LOGIN_OS_ANY, 0x01) -#define SCX_LOGIN_PRIVILEGED_KERNEL \ - SCX_LOGIN_VARIANT(SCX_LOGIN_PRIVILEGED, SCX_LOGIN_OS_ANY, 0x01) - -/* Linux variants */ -#define SCX_LOGIN_USER_LINUX_EUID \ - SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_LINUX, 0x01) -#define SCX_LOGIN_GROUP_LINUX_GID \ - SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_LINUX, 0x01) -#define SCX_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \ - SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_LINUX, 0x01) -#define SCX_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \ - SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_LINUX, 0x01) -#define SCX_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \ - SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_LINUX, 0x01) - -/* Android variants */ -#define SCX_LOGIN_USER_ANDROID_EUID \ - SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANDROID, 0x01) -#define SCX_LOGIN_GROUP_ANDROID_GID \ - SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANDROID, 0x01) -#define SCX_LOGIN_APPLICATION_ANDROID_UID \ - SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_ANDROID, 0x01) -#define SCX_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \ - SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANDROID, \ - 0x01) -#define SCX_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \ - SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_ANDROID, \ - 0x01) - -/* - * return origins - */ -#define SCX_ORIGIN_COMMS 2 -#define SCX_ORIGIN_TEE 3 -#define SCX_ORIGIN_TRUSTED_APP 4 -/* - * The SCX message types. - */ -#define SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02 -#define SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD -#define SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7 -#define SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9 -#define SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0 -#define SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2 -#define SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5 -#define SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4 -#define SCX_MESSAGE_TYPE_MANAGEMENT 0xFE - - -/* - * The error codes - */ -#define S_SUCCESS 0x00000000 -#define S_ERROR_NO_DATA 0xFFFF000B -#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C - - -struct SCX_COMMAND_HEADER { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo; - u32 nOperationID; -}; - -struct SCX_ANSWER_HEADER { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo; - u32 nOperationID; - u32 nErrorCode; -}; - -/* - * CREATE_DEVICE_CONTEXT command message. - */ -struct SCX_COMMAND_CREATE_DEVICE_CONTEXT { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - u32 nOperationID; - u32 nDeviceContextID; -}; - -/* - * CREATE_DEVICE_CONTEXT answer message. - */ -struct SCX_ANSWER_CREATE_DEVICE_CONTEXT { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - /* an opaque Normal World identifier for the operation */ - u32 nOperationID; - u32 nErrorCode; - /* an opaque Normal World identifier for the device context */ - u32 hDeviceContext; -}; - -/* - * DESTROY_DEVICE_CONTEXT command message. - */ -struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - u32 nOperationID; - u32 hDeviceContext; -}; - -/* - * DESTROY_DEVICE_CONTEXT answer message. - */ -struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - /* an opaque Normal World identifier for the operation */ - u32 nOperationID; - u32 nErrorCode; - u32 nDeviceContextID; -}; - -/* - * OPEN_CLIENT_SESSION command message. - */ -struct SCX_COMMAND_OPEN_CLIENT_SESSION { - u8 nMessageSize; - u8 nMessageType; - u16 nParamTypes; - /* an opaque Normal World identifier for the operation */ - u32 nOperationID; - u32 hDeviceContext; - u32 nCancellationID; - u64 sTimeout; - struct SCX_UUID sDestinationUUID; - union SCX_COMMAND_PARAM sParams[4]; - u32 nLoginType; - /* - * Size = 0 for public, [16] for group identification, [20] for - * authentication - */ - u8 sLoginData[20]; -}; - -/* - * OPEN_CLIENT_SESSION answer message. - */ -struct SCX_ANSWER_OPEN_CLIENT_SESSION { - u8 nMessageSize; - u8 nMessageType; - u8 nReturnOrigin; - u8 __nReserved; - /* an opaque Normal World identifier for the operation */ - u32 nOperationID; - u32 nErrorCode; - u32 hClientSession; - union SCX_ANSWER_PARAM sAnswers[4]; -}; - -/* - * CLOSE_CLIENT_SESSION command message. - */ -struct SCX_COMMAND_CLOSE_CLIENT_SESSION { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - /* an opaque Normal World identifier for the operation */ - u32 nOperationID; - u32 hDeviceContext; - u32 hClientSession; -}; - -/* - * CLOSE_CLIENT_SESSION answer message. - */ -struct SCX_ANSWER_CLOSE_CLIENT_SESSION { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - /* an opaque Normal World identifier for the operation */ - u32 nOperationID; - u32 nErrorCode; -}; - - -/* - * REGISTER_SHARED_MEMORY command message - */ -struct SCX_COMMAND_REGISTER_SHARED_MEMORY { - u8 nMessageSize; - u8 nMessageType; - u16 nMemoryFlags; - u32 nOperationID; - u32 hDeviceContext; - u32 nBlockID; - u32 nSharedMemSize; - u32 nSharedMemStartOffset; - u32 nSharedMemDescriptors[SCX_MAX_COARSE_PAGES]; -}; - -/* - * REGISTER_SHARED_MEMORY answer message. - */ -struct SCX_ANSWER_REGISTER_SHARED_MEMORY { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - /* an opaque Normal World identifier for the operation */ - u32 nOperationID; - u32 nErrorCode; - u32 hBlock; -}; - -/* - * RELEASE_SHARED_MEMORY command message. - */ -struct SCX_COMMAND_RELEASE_SHARED_MEMORY { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - /* an opaque Normal World identifier for the operation */ - u32 nOperationID; - u32 hDeviceContext; - u32 hBlock; -}; - -/* - * RELEASE_SHARED_MEMORY answer message. - */ -struct SCX_ANSWER_RELEASE_SHARED_MEMORY { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - u32 nOperationID; - u32 nErrorCode; - u32 nBlockID; -}; - -/* - * INVOKE_CLIENT_COMMAND command message. - */ -struct SCX_COMMAND_INVOKE_CLIENT_COMMAND { - u8 nMessageSize; - u8 nMessageType; - u16 nParamTypes; - u32 nOperationID; - u32 hDeviceContext; - u32 hClientSession; - u64 sTimeout; - u32 nCancellationID; - u32 nClientCommandIdentifier; - union SCX_COMMAND_PARAM sParams[4]; -}; - -/* - * INVOKE_CLIENT_COMMAND command answer. - */ -struct SCX_ANSWER_INVOKE_CLIENT_COMMAND { - u8 nMessageSize; - u8 nMessageType; - u8 nReturnOrigin; - u8 __nReserved; - u32 nOperationID; - u32 nErrorCode; - union SCX_ANSWER_PARAM sAnswers[4]; -}; - -/* - * CANCEL_CLIENT_OPERATION command message. - */ -struct SCX_COMMAND_CANCEL_CLIENT_OPERATION { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - /* an opaque Normal World identifier for the operation */ - u32 nOperationID; - u32 hDeviceContext; - u32 hClientSession; - u32 nCancellationID; -}; - -struct SCX_ANSWER_CANCEL_CLIENT_OPERATION { - u8 nMessageSize; - u8 nMessageType; - u16 nMessageInfo_RFU; - u32 nOperationID; - u32 nErrorCode; -}; - -/* - * MANAGEMENT command message. - */ -struct SCX_COMMAND_MANAGEMENT { - u8 nMessageSize; - u8 nMessageType; - u16 nCommand; - u32 nOperationID; - u32 nW3BSize; - u32 nW3BStartOffset; - u32 nSharedMemDescriptors[1]; -}; - -/* - * POWER_MANAGEMENT answer message. - * The message does not provide message specific parameters. - * Therefore no need to define a specific answer structure - */ - -/* - * Structure for L2 messages - */ -union SCX_COMMAND_MESSAGE { - struct SCX_COMMAND_HEADER sHeader; - struct SCX_COMMAND_CREATE_DEVICE_CONTEXT sCreateDeviceContextMessage; - struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextMessage; - struct SCX_COMMAND_OPEN_CLIENT_SESSION sOpenClientSessionMessage; - struct SCX_COMMAND_CLOSE_CLIENT_SESSION sCloseClientSessionMessage; - struct SCX_COMMAND_REGISTER_SHARED_MEMORY sRegisterSharedMemoryMessage; - struct SCX_COMMAND_RELEASE_SHARED_MEMORY sReleaseSharedMemoryMessage; - struct SCX_COMMAND_INVOKE_CLIENT_COMMAND sInvokeClientCommandMessage; - struct SCX_COMMAND_CANCEL_CLIENT_OPERATION - sCancelClientOperationMessage; - struct SCX_COMMAND_MANAGEMENT sManagementMessage; -}; - -/* - * Structure for any L2 answer - */ - -union SCX_ANSWER_MESSAGE { - struct SCX_ANSWER_HEADER sHeader; - struct SCX_ANSWER_CREATE_DEVICE_CONTEXT sCreateDeviceContextAnswer; - struct SCX_ANSWER_OPEN_CLIENT_SESSION sOpenClientSessionAnswer; - struct SCX_ANSWER_CLOSE_CLIENT_SESSION sCloseClientSessionAnswer; - struct SCX_ANSWER_REGISTER_SHARED_MEMORY sRegisterSharedMemoryAnswer; - struct SCX_ANSWER_RELEASE_SHARED_MEMORY sReleaseSharedMemoryAnswer; - struct SCX_ANSWER_INVOKE_CLIENT_COMMAND sInvokeClientCommandAnswer; - struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextAnswer; - struct SCX_ANSWER_CANCEL_CLIENT_OPERATION sCancelClientOperationAnswer; -}; - -/* Structure of the Communication Buffer */ -struct SCHANNEL_C1S_BUFFER { - u32 nConfigFlags_S; - u32 nW3BSizeMax_S; - u32 nReserved0; - u32 nW3BSizeCurrent_S; - u8 sReserved1[48]; - u8 sVersionDescription[SCX_DESCRIPTION_BUFFER_LENGTH]; - u32 nStatus_S; - u32 sReserved2; - u32 nSyncSerial_N; - u32 nSyncSerial_S; - u64 sTime_N[2]; - u64 sTimeout_S[2]; - u32 nFirstCommand; - u32 nFirstFreeCommand; - u32 nFirstAnswer; - u32 nFirstFreeAnswer; - u32 nW3BDescriptors[128]; - #ifdef CONFIG_TF_MSHIELD - u8 sRPCTraceBuffer[140]; - u8 sRPCShortcutBuffer[180]; - #else - u8 sReserved3[320]; - #endif - u32 sCommandQueue[SCX_N_MESSAGE_QUEUE_CAPACITY]; - u32 sAnswerQueue[SCX_S_ANSWER_QUEUE_CAPACITY]; -}; - - -/* - * SCX_VERSION_INFORMATION_BUFFER structure description - * Description of the sVersionBuffer handed over from user space to kernel space - * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl - * and handed back to user space - */ -struct SCX_VERSION_INFORMATION_BUFFER { - u8 sDriverDescription[65]; - u8 sSecureWorldDescription[65]; -}; - - -/* The IOCTLs the driver supports */ -#include <linux/ioctl.h> - -#define IOCTL_SCX_GET_VERSION _IO('z', 0) -#define IOCTL_SCX_EXCHANGE _IOWR('z', 1, union SCX_COMMAND_MESSAGE) -#define IOCTL_SCX_GET_DESCRIPTION _IOR('z', 2, \ - struct SCX_VERSION_INFORMATION_BUFFER) - -#endif /* !defined(__SCX_PROTOCOL_H__) */ diff --git a/security/smc/omap4/scx_public_crypto.c b/security/smc/omap4/scx_public_crypto.c deleted file mode 100644 index d6b751c..0000000 --- a/security/smc/omap4/scx_public_crypto.c +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include "scxlnx_defs.h" -#include "scxlnx_util.h" -#include "scxlnx_mshield.h" -#include "scx_public_crypto.h" -#include "scx_public_dma.h" - -#define IO_ADDRESS OMAP2_L4_IO_ADDRESS - -#define S_SUCCESS 0x00000000 -#define S_ERROR_GENERIC 0xFFFF0000 -#define S_ERROR_ACCESS_DENIED 0xFFFF0001 -#define S_ERROR_BAD_FORMAT 0xFFFF0005 -#define S_ERROR_BAD_PARAMETERS 0xFFFF0006 -#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C -#define S_ERROR_SHORT_BUFFER 0xFFFF0010 -#define S_ERROR_UNREACHABLE 0xFFFF3013 -#define S_ERROR_SERVICE 0xFFFF1000 - -#define CKR_OK 0x00000000 - -#define PUBLIC_CRYPTO_TIMEOUT_CONST 0x000FFFFF - -#define RPC_AES1_CODE PUBLIC_CRYPTO_HWA_AES1 -#define RPC_AES2_CODE PUBLIC_CRYPTO_HWA_AES2 -#define RPC_DES_CODE PUBLIC_CRYPTO_HWA_DES -#define RPC_SHA_CODE PUBLIC_CRYPTO_HWA_SHA - -#define RPC_CRYPTO_COMMAND_MASK 0x000003c0 - -#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR 0x200 -#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_UNLOCK 0x000 -#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_LOCK 0x001 - -#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT 0x240 -#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_AES1 RPC_AES1_CODE -#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_AES2 RPC_AES2_CODE -#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_DES RPC_DES_CODE -#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_SHA RPC_SHA_CODE -#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_SUSPEND 0x010 -#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_UNINSTALL 0x020 - -#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS 0x280 -#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES1 RPC_AES1_CODE -#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES2 RPC_AES2_CODE -#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_DES RPC_DES_CODE -#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_SHA RPC_SHA_CODE -#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_RESUME 0x010 - -#define RPC_CLEAR_GLOBAL_KEY_CONTEXT 0x2c0 -#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_AES 0x001 -#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_DES 0x002 - -#define ENABLE_CLOCK true -#define DISABLE_CLOCK false - -/*---------------------------------------------------------------------------*/ -/*RPC IN/OUT structures for CUS implementation */ -/*---------------------------------------------------------------------------*/ - -struct RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_OUT { - u32 nShortcutID; - u32 nError; -}; - -struct RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_IN { - u32 nDeviceContextID; - u32 hClientSession; - u32 nCommandID; - u32 hKeyContext; - /** - *The identifier of the HWA accelerator that this shortcut uses! - *Possible values are: - *- 1 (RPC_AES1_CODE) - *- 2 (RPC_AES2_CODE) - *- 4 (RPC_DES_CODE) - *- 8 (RPC_SHA_CODE) - **/ - u32 nHWAID; - /** - *This field defines the algorithm, direction, mode, key size. - *It contains some of the bits of the corresponding "CTRL" register - *of the accelerator. - * - *More precisely: - *For AES1 accelerator, nHWA_CTRL contains the following bits: - *- CTR (bit 6): - * when 1, selects CTR mode. - * when 0, selects CBC or ECB mode (according to CBC bit) - *- CBC (bit 5) - * when 1, selects CBC mode (but only if CTR=0) - * when 0, selects EBC mode (but only if CTR=0) - *- DIRECTION (bit 2) - * 0: decryption - * 1: encryption - * - *For the DES2 accelerator, nHWA_CTRL contains the following bits: - *- CBC (bit 4): 1 for CBC, 0 for ECB - *- DIRECTION (bit 2): 0 for decryption, 1 for encryption - * - *For the SHA accelerator, nHWA_CTRL contains the following bits: - *- ALGO (bit 2:1): - * 0x0: MD5 - * 0x1: SHA1 - * 0x2: SHA-224 - * 0x3: SHA-256 - **/ - u32 nHWA_CTRL; - union PUBLIC_CRYPTO_OPERATION_STATE sOperationState; -}; - -struct RPC_LOCK_HWA_SUSPEND_SHORTCUT_OUT { - union PUBLIC_CRYPTO_OPERATION_STATE sOperationState; -}; - -struct RPC_LOCK_HWA_SUSPEND_SHORTCUT_IN { - u32 nShortcutID; -}; - -struct RPC_RESUME_SHORTCUT_UNLOCK_HWA_IN { - u32 nShortcutID; - u32 hAES1KeyContext; - u32 hAES2KeyContext; - u32 hDESKeyContext; - union PUBLIC_CRYPTO_OPERATION_STATE sOperationState; -}; - -/*------------------------------------------------------------------------- */ -/* - * HWA public lock or unlock one HWA according algo specified by nHWAID - */ -void PDrvCryptoLockUnlockHWA(u32 nHWAID, bool bDoLock) -{ - int is_sem = 0; - struct semaphore *s = NULL; - struct mutex *m = NULL; - struct SCXLNX_DEVICE *dev = SCXLNXGetDevice(); - - dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA:nHWAID=0x%04X bDoLock=%d\n", - nHWAID, bDoLock); - - switch (nHWAID) { - case RPC_AES1_CODE: - s = &dev->sAES1CriticalSection; - is_sem = 1; - break; - case RPC_AES2_CODE: - s = &dev->sAES2CriticalSection; - is_sem = 1; - break; - case RPC_DES_CODE: - m = &dev->sDESCriticalSection; - break; - default: - case RPC_SHA_CODE: - m = &dev->sSHACriticalSection; - break; - } - - if (bDoLock == LOCK_HWA) { - dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA: " - "Wait for HWAID=0x%04X\n", nHWAID); - if (is_sem) { - while (down_trylock(s)) - cpu_relax(); - } else { - while (!mutex_trylock(m)) - cpu_relax(); - } - dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA: " - "Locked on HWAID=0x%04X\n", nHWAID); - } else { - if (is_sem) - up(s); - else - mutex_unlock(m); - dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA: " - "Released for HWAID=0x%04X\n", nHWAID); - } -} - -/*------------------------------------------------------------------------- */ -/** - *Initialize the public crypto DMA channels, global HWA semaphores and handles - */ -u32 SCXPublicCryptoInit(void) -{ - struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice(); - u32 nError = PUBLIC_CRYPTO_OPERATION_SUCCESS; - - /* Initialize HWAs */ - PDrvCryptoAESInit(); - PDrvCryptoDigestInit(); - - /*initialize the HWA semaphores */ - sema_init(&pDevice->sAES1CriticalSection, 1); - sema_init(&pDevice->sAES2CriticalSection, 1); - mutex_init(&pDevice->sSHACriticalSection); - - /*initialize the current key handle loaded in the AESn/DES HWA */ - pDevice->hAES1SecureKeyContext = 0; - pDevice->hAES2SecureKeyContext = 0; - pDevice->bSHAM1IsPublic = false; - - /*initialize the DMA semaphores */ - mutex_init(&pDevice->sm.sDMALock); - - /*allocate DMA buffer */ - pDevice->nDMABufferLength = PAGE_SIZE * 16; - pDevice->pDMABuffer = dma_alloc_coherent(NULL, - pDevice->nDMABufferLength, - &(pDevice->pDMABufferPhys), - GFP_KERNEL); - if (pDevice->pDMABuffer == NULL) { - printk(KERN_ERR - "SCXPublicCryptoInit: Out of memory for DMA buffer\n"); - nError = S_ERROR_OUT_OF_MEMORY; - } - - return nError; -} - -/*------------------------------------------------------------------------- */ -/* - *Initialize the device context CUS fields (shortcut semaphore and public CUS - *list) - */ -void SCXPublicCryptoInitDeviceContext(struct SCXLNX_CONNECTION *pDeviceContext) -{ - /*initialize the CUS list in the given device context */ - spin_lock_init(&(pDeviceContext->shortcutListCriticalSectionLock)); - INIT_LIST_HEAD(&(pDeviceContext->ShortcutList)); -} - -/*------------------------------------------------------------------------- */ -/** - *Terminate the public crypto (including DMA) - */ -void SCXPublicCryptoTerminate() -{ - struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice(); - - if (pDevice->pDMABuffer != NULL) { - dma_free_coherent(NULL, pDevice->nDMABufferLength, - pDevice->pDMABuffer, - pDevice->pDMABufferPhys); - pDevice->pDMABuffer = NULL; - } - - PDrvCryptoDigestExit(); - PDrvCryptoAESExit(); -} - -/*------------------------------------------------------------------------- */ - -void SCXPublicCryptoWaitForReadyBitInfinitely(u32 *pRegister, u32 vBit) -{ - while (!(INREG32(pRegister) & vBit)) - ; -} - -/*------------------------------------------------------------------------- */ - -u32 SCXPublicCryptoWaitForReadyBit(u32 *pRegister, u32 vBit) -{ - u32 timeoutCounter = PUBLIC_CRYPTO_TIMEOUT_CONST; - - while ((!(INREG32(pRegister) & vBit)) && ((--timeoutCounter) != 0)) - ; - - if (timeoutCounter == 0) - return PUBLIC_CRYPTO_ERR_TIMEOUT; - - return PUBLIC_CRYPTO_OPERATION_SUCCESS; -} - -/*------------------------------------------------------------------------- */ - -static DEFINE_SPINLOCK(clk_lock); - -void SCXPublicCryptoDisableClock(uint32_t vClockPhysAddr) -{ - u32 *pClockReg; - u32 val; - unsigned long flags; - - dprintk(KERN_INFO "SCXPublicCryptoDisableClock: " \ - "vClockPhysAddr=0x%08X\n", - vClockPhysAddr); - - /* Ensure none concurrent access when changing clock registers */ - spin_lock_irqsave(&clk_lock, flags); - - pClockReg = (u32 *)IO_ADDRESS(vClockPhysAddr); - - val = __raw_readl(pClockReg); - val &= ~(0x3); - __raw_writel(val, pClockReg); - - /* Wait for clock to be fully disabled */ - while ((__raw_readl(pClockReg) & 0x30000) == 0) - ; - - spin_unlock_irqrestore(&clk_lock, flags); - - tf_l4sec_clkdm_allow_idle(false, true); -} - -/*------------------------------------------------------------------------- */ - -void SCXPublicCryptoEnableClock(uint32_t vClockPhysAddr) -{ - u32 *pClockReg; - u32 val; - unsigned long flags; - - dprintk(KERN_INFO "SCXPublicCryptoEnableClock: " \ - "vClockPhysAddr=0x%08X\n", - vClockPhysAddr); - - tf_l4sec_clkdm_wakeup(false, true); - - /* Ensure none concurrent access when changing clock registers */ - spin_lock_irqsave(&clk_lock, flags); - - pClockReg = (u32 *)IO_ADDRESS(vClockPhysAddr); - - val = __raw_readl(pClockReg); - val |= 0x2; - __raw_writel(val, pClockReg); - - /* Wait for clock to be fully enabled */ - while ((__raw_readl(pClockReg) & 0x30000) != 0) - ; - - spin_unlock_irqrestore(&clk_lock, flags); -} - diff --git a/security/smc/omap4/scx_public_dma.c b/security/smc/omap4/scx_public_dma.c deleted file mode 100644 index 743c333..0000000 --- a/security/smc/omap4/scx_public_dma.c +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include "scxlnx_defs.h" -#include "scxlnx_util.h" -#include "scx_public_dma.h" - -#include <asm/atomic.h> - -static atomic_t g_dmaEventFlag = ATOMIC_INIT(0); - -/*------------------------------------------------------------------------ */ -/* - * Internal functions - */ - -static void scxPublicDMACallback(int lch, u16 ch_status, void *data) -{ - atomic_inc(&g_dmaEventFlag); -} - -/*------------------------------------------------------------------------ */ -/* - * Public DMA API - */ - -u32 scxPublicDMARequest(int *lch) -{ - int dma_ch_out = 0; - - if (lch == NULL) - return PUBLIC_CRYPTO_ERR_BAD_PARAMETERS; - - if (omap_request_dma(0, "SMC Public Crypto", - scxPublicDMACallback, NULL, &dma_ch_out) != 0) - return PUBLIC_CRYPTO_ERR_OUT_OF_MEMORY; - - omap_disable_dma_irq(dma_ch_out, OMAP_DMA_DROP_IRQ | - OMAP_DMA_BLOCK_IRQ); - - *lch = dma_ch_out; - - return PUBLIC_CRYPTO_OPERATION_SUCCESS; -} - -/*------------------------------------------------------------------------ */ -/* - * Release a DMA channel - */ -u32 scxPublicDMARelease(int lch) -{ - omap_free_dma(lch); - - return PUBLIC_CRYPTO_OPERATION_SUCCESS; -} - -/*------------------------------------------------------------------------ */ - -void scxPublicDMASetParams(int lch, struct omap_dma_channel_params *pParams) -{ - omap_set_dma_params(lch, pParams); -} - -/*------------------------------------------------------------------------ */ - -void scxPublicDMAStart(int lch, int interruptMask) -{ - atomic_set(&g_dmaEventFlag, 0); - omap_enable_dma_irq(lch, interruptMask); - omap_start_dma(lch); -} - -/*------------------------------------------------------------------------ */ - -void scxPublicDMADisableChannel(int lch) -{ - omap_stop_dma(lch); -} - -/*------------------------------------------------------------------------ */ - -void scxPublicDMAClearChannel(int lch) -{ - omap_clear_dma(lch); -} - -/*------------------------------------------------------------------------ */ - -void scxPublicDMAWait(int nr_of_cb) -{ - while (atomic_read(&g_dmaEventFlag) < nr_of_cb) - cpu_relax(); -} - -/*------------------------------------------------------------------------ */ -/* - * Perform common DMA channel setup, used to factorize the code - * - * Output: struct omap_dma_channel_params *pDMAChannel - * Inputs: u32 nbBlocks Number of block of the transfer - * u32 nbElements Number of elements of the transfer - * u32 nDstStart Destination address - * u32 nSrcStart Source address - * u32 nTriggerID Trigger ID - */ -void scxPublicSetDMAChannelCommonParams( - struct omap_dma_channel_params *pDMAChannel, - u32 nbBlocks, u32 nbElements, - u32 nDstStart, u32 nSrcStart, u32 nTriggerID) -{ - pDMAChannel->data_type = OMAP_DMA_DATA_TYPE_S32; - pDMAChannel->elem_count = nbElements; - pDMAChannel->frame_count = nbBlocks; - pDMAChannel->src_ei = 0; - pDMAChannel->src_fi = 0; - pDMAChannel->dst_ei = 0; - pDMAChannel->dst_fi = 0; - pDMAChannel->sync_mode = OMAP_DMA_SYNC_FRAME; - pDMAChannel->src_start = nSrcStart; - pDMAChannel->dst_start = nDstStart; - pDMAChannel->trigger = nTriggerID; -} diff --git a/security/smc/omap4/scx_public_dma.h b/security/smc/omap4/scx_public_dma.h deleted file mode 100644 index ddd19b2..0000000 --- a/security/smc/omap4/scx_public_dma.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef __SCX_PUBLIC_DMA_H -#define __SCX_PUBLIC_DMA_H - -#include <linux/dma-mapping.h> -#include <plat/dma.h> -#include <plat/dma-44xx.h> - -#include "scx_public_crypto.h" - -/*--------------------------------------------------------------------------- - * Cache management (implemented in the assembler file) - *-------------------------------------------------------------------------- */ - -u32 v7_dma_flush_range(u32 nVAStart, u32 nVAEnd); -u32 v7_dma_inv_range(u32 nVAStart, u32 nVAEnd); - -/*-------------------------------------------------------------------------- */ -/* - * Public DMA API - */ - -/* - * CEN Masks - */ -#define DMA_CEN_Elts_per_Frame_AES 4 -#define DMA_CEN_Elts_per_Frame_DES 2 -#define DMA_CEN_Elts_per_Frame_SHA 16 - -/* - * Request a DMA channel - */ -u32 scxPublicDMARequest(int *lch); - -/* - * Release a DMA channel - */ -u32 scxPublicDMARelease(int lch); - -/** - * This function waits for the DMA IRQ. - */ -void scxPublicDMAWait(int nr_of_cb); - -/* - * This function starts a DMA operation. - * - * lch DMA channel ID. - * interruptMask Configures the Channel Interrupt Control Register. - */ -void scxPublicDMAStart(int lch, int interruptMask); - -void scxPublicSetDMAChannelCommonParams( - struct omap_dma_channel_params *pDMAChannel, - u32 nbBlocks, u32 nbElements, u32 nDstStart, - u32 nSrcStart, u32 nTriggerID); -void scxPublicDMASetParams(int lch, struct omap_dma_channel_params *pParams); -void scxPublicDMADisableChannel(int lch); -void scxPublicDMAClearChannel(int lch); - -#endif /*__SCX_PUBLIC_DMA_H */ diff --git a/security/smc/omap4/scxlnx_comm_mshield.c b/security/smc/omap4/scxlnx_comm_mshield.c deleted file mode 100644 index ccd2098..0000000 --- a/security/smc/omap4/scxlnx_comm_mshield.c +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2010 Trusted Logic S.A. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA - */ - -#include <asm/div64.h> -#include <asm/system.h> -#include <asm/cputype.h> -#include <linux/uaccess.h> -#include <linux/io.h> -#include <linux/interrupt.h> -#include <linux/page-flags.h> -#include <linux/pagemap.h> -#include <linux/vmalloc.h> -#include <linux/version.h> -#include <linux/jiffies.h> -#include <linux/dma-mapping.h> -#include <linux/cpu.h> - -#include <asm/cacheflush.h> - -#include <clockdomain.h> - -#include "scxlnx_defs.h" - -#ifdef CONFIG_HAS_WAKELOCK -static struct wake_lock g_tf_wake_lock; -static atomic_t tf_wake_lock_count = ATOMIC_INIT(0); -#endif - -static struct clockdomain *smc_l4_sec_clkdm; -static atomic_t smc_l4_sec_clkdm_use_count = ATOMIC_INIT(0); - -static int __init tf_early_init(void) -{ - smc_l4_sec_clkdm = clkdm_lookup("l4_secure_clkdm"); - if (smc_l4_sec_clkdm == NULL) - return -EFAULT; - -#ifdef CONFIG_HAS_WAKELOCK - wake_lock_init(&g_tf_wake_lock, WAKE_LOCK_SUSPEND, - SCXLNX_DEVICE_BASE_NAME); -#endif - - return 0; -} -early_initcall(tf_early_init); - -/*-------------------------------------------------------------------------- - * L4 SEC Clock domain handling - *-------------------------------------------------------------------------- */ - -void tf_l4sec_clkdm_wakeup(bool use_spin_lock, bool wakelock) -{ - if (use_spin_lock) - spin_lock(&SCXLNXGetDevice()->sm.lock); -#ifdef CONFIG_HAS_WAKELOCK - if (wakelock) { - atomic_inc(&tf_wake_lock_count); - wake_lock(&g_tf_wake_lock); - } -#endif - atomic_inc(&smc_l4_sec_clkdm_use_count); - clkdm_wakeup(smc_l4_sec_clkdm); - if (use_spin_lock) - spin_unlock(&SCXLNXGetDevice()->sm.lock); -} - -void tf_l4sec_clkdm_allow_idle(bool use_spin_lock, bool wakeunlock) -{ - if (use_spin_lock) - spin_lock(&SCXLNXGetDevice()->sm.lock); - if (atomic_dec_return(&smc_l4_sec_clkdm_use_count) == 0) - clkdm_allow_idle(smc_l4_sec_clkdm); -#ifdef CONFIG_HAS_WAKELOCK - if (wakeunlock) - if (atomic_dec_return(&tf_wake_lock_count) == 0) - wake_unlock(&g_tf_wake_lock); -#endif - if (use_spin_lock) - spin_unlock(&SCXLNXGetDevice()->sm.lock); -} - diff --git a/security/smc/omap4/scxlnx_device.c b/security/smc/omap4/scxlnx_device.c deleted file mode 100644 index cd9d56b..0000000 --- a/security/smc/omap4/scxlnx_device.c +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, - * MA 02111-1307 USA - */ - -#include <asm/atomic.h> -#include <linux/uaccess.h> -#include <linux/module.h> -#include <linux/errno.h> -#include <linux/mm.h> -#include <linux/page-flags.h> -#include <linux/pm.h> -#include <linux/sysdev.h> -#include <linux/vmalloc.h> -#include <linux/signal.h> -#ifdef CONFIG_ANDROID -#include <linux/device.h> -#endif - -#include "scx_protocol.h" -#include "scxlnx_defs.h" -#include "scxlnx_util.h" -#ifdef CONFIG_TF_MSHIELD -#include <plat/cpu.h> -#include "scx_public_crypto.h" -#endif - -/* The single device supported by this driver */ -static struct SCXLNX_DEVICE g_SCXLNXDevice = {0, }; - -/*---------------------------------------------------------------------------- - * Implementations - *----------------------------------------------------------------------------*/ - -struct SCXLNX_DEVICE *SCXLNXGetDevice(void) -{ - return &g_SCXLNXDevice; -} - -/*----------------------------------------------------------------------------*/ - -static int __init register_dmcrypt_engines(void) -{ - int ret; - - printk(KERN_INFO "Entered register_dmcrypt_engines"); - - ret = SCXPublicCryptoInit(); - if (ret) { - printk(KERN_ERR "register_dmcrypt_engines():" - " SCXPublicCryptoInit failed, (error %d)!\n", ret); - goto out; - } - - ret = register_smc_public_crypto_aes(); - if (ret) { - printk(KERN_ERR "register_dmcrypt_engines():" - " regiser_smc_public_crypto_aes failed, (error %d)!\n", ret); - goto out; - } - - ret = register_smc_public_crypto_digest(); - if (ret) { - printk(KERN_ERR "register_dmcrypt_engines():" - " regiser_smc_public_crypto_digest failed, (error %d)!\n", ret); - goto out; - } - -out: - return ret; -} -module_init(register_dmcrypt_engines); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Trusted Logic S.A."); diff --git a/security/smc/rproc_drm.c b/security/smc/rproc_drm.c new file mode 100644 index 0000000..b86b0b8 --- /dev/null +++ b/security/smc/rproc_drm.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2011 Texas Instruments, Inc. + * Copyright (c) 2011 Trusted Logic S.A. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* + * This file implements the non-secure rproc and smc interface/integration + */ + +#include <linux/types.h> +#include <linux/module.h> + +#include "tee_client_api.h" +#include "tf_defs.h" + +/* 7B1DD682-1077-4939-9755-B6192C5CC5FD */ +#define WVDRM_UUID {0x7B1DD682, 0x1077, 0x4939, \ + {0x97, 0x55, 0xB6, 0x19, 0x2C, 0x5C, 0xC5, 0xFD} } + +#define WVDRM_ENTER_SECURE_PLAYBACK 0x00003000 + +#define WVDRM_EXIT_SECURE_PLAYBACK 0x00003001 + +enum rproc_drm_s_state { + RPROC_DRM_SECURE_LEAVE, + RPROC_DRM_SECURE_ENTER +}; + +static enum rproc_drm_s_state s_state; + +static TEEC_Result rproc_drm_initialize(TEEC_Context *teec_context, + TEEC_Session *teec_session) +{ + static const TEEC_UUID drm_uuid = WVDRM_UUID; + static u32 drm_gid = 1019; + TEEC_Result result; + + result = TEEC_InitializeContext(NULL, teec_context); + if (result != TEEC_SUCCESS) + goto exit; + + result = TEEC_OpenSession(teec_context, teec_session, &drm_uuid, + TEEC_LOGIN_PRIVILEGED, &drm_gid, NULL, NULL); + if (result != TEEC_SUCCESS) + TEEC_FinalizeContext(teec_context); + +exit: + return result; +} + +static TEEC_Result rproc_drm_finalize(TEEC_Context *teec_context, + TEEC_Session *teec_session) +{ + TEEC_CloseSession(teec_session); + TEEC_FinalizeContext(teec_context); + return TEEC_SUCCESS; +} + +static TEEC_Result _rproc_drm_invoke_secure_service(bool enable) +{ + TEEC_Result result; + TEEC_Operation operation; + TEEC_Context teec_context; + TEEC_Session teec_session; + u32 command; + + result = rproc_drm_initialize(&teec_context, &teec_session); + if (result != TEEC_SUCCESS) + goto out; + + operation.paramTypes = TEEC_PARAM_TYPES(TEEC_NONE, TEEC_NONE, + TEEC_NONE, TEEC_NONE); + command = (enable ? WVDRM_ENTER_SECURE_PLAYBACK : + WVDRM_EXIT_SECURE_PLAYBACK); + result = TEEC_InvokeCommand(&teec_session, command, &operation, NULL); + rproc_drm_finalize(&teec_context, &teec_session); +out: + return result; +} + +int rproc_drm_invoke_service(bool enable) +{ + int ret; + + if ((s_state == RPROC_DRM_SECURE_ENTER && enable) || + (s_state == RPROC_DRM_SECURE_LEAVE && !enable)) + return 0; + + ret = _rproc_drm_invoke_secure_service(enable); + s_state = (enum rproc_drm_s_state) enable; + + return ret == TEEC_SUCCESS ? 0 : -EACCES; +} +EXPORT_SYMBOL(rproc_drm_invoke_service); diff --git a/security/smc/s_version.h b/security/smc/s_version.h new file mode 100644 index 0000000..a16d548 --- /dev/null +++ b/security/smc/s_version.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2010 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifndef __S_VERSION_H__ +#define __S_VERSION_H__ + +/* + * Usage: define S_VERSION_BUILD on the compiler's command line. + * + * Then set: + * - S_VERSION_OS + * - S_VERSION_PLATFORM + * - S_VERSION_MAIN + * - S_VERSION_ENG is optional + * - S_VERSION_PATCH is optional + * - S_VERSION_BUILD = 0 if S_VERSION_BUILD not defined or empty + */ + +#define S_VERSION_OS "A" /* "A" for all Android */ +#define S_VERSION_PLATFORM "G" /* "G" for 4430 */ + +/* + * This version number must be updated for each new release + */ +#define S_VERSION_MAIN "01.04" + +/* +* If this is a patch or engineering version use the following +* defines to set the version number. Else set these values to 0. +*/ +#define S_VERSION_PATCH 6 +#define S_VERSION_ENG 0 + +#ifdef S_VERSION_BUILD +/* TRICK: detect if S_VERSION is defined but empty */ +#if 0 == S_VERSION_BUILD-0 +#undef S_VERSION_BUILD +#define S_VERSION_BUILD 0 +#endif +#else +/* S_VERSION_BUILD is not defined */ +#define S_VERSION_BUILD 0 +#endif + +#define __STRINGIFY(X) #X +#define __STRINGIFY2(X) __STRINGIFY(X) + +#if S_VERSION_ENG != 0 +#define _S_VERSION_ENG "e" __STRINGIFY2(S_VERSION_ENG) +#else +#define _S_VERSION_ENG "" +#endif + +#if S_VERSION_PATCH != 0 +#define _S_VERSION_PATCH "p" __STRINGIFY2(S_VERSION_PATCH) +#else +#define _S_VERSION_PATCH "" +#endif + +#if !defined(NDEBUG) || defined(_DEBUG) +#define S_VERSION_VARIANT "D " +#else +#define S_VERSION_VARIANT " " +#endif + +#define S_VERSION_STRING \ + "SMC" \ + S_VERSION_OS \ + S_VERSION_PLATFORM \ + S_VERSION_MAIN \ + _S_VERSION_PATCH \ + _S_VERSION_ENG \ + "." __STRINGIFY2(S_VERSION_BUILD) " " \ + S_VERSION_VARIANT + +#endif /* __S_VERSION_H__ */ diff --git a/security/smc/tee_client_api.h b/security/smc/tee_client_api.h new file mode 100644 index 0000000..d57be69 --- /dev/null +++ b/security/smc/tee_client_api.h @@ -0,0 +1,189 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +/* + * This header file corresponds to V1.0 of the GlobalPlatform + * TEE Client API Specification + */ +#ifndef __TEE_CLIENT_API_H__ +#define __TEE_CLIENT_API_H__ + +#include <linux/types.h> + +#ifndef TEEC_EXPORT +#define TEEC_EXPORT +#endif + +/* The header tee_client_api_imp.h must define implementation-dependent + types, constants and macros. + + The implementation-dependent types are: + - TEEC_Context_IMP + - TEEC_Session_IMP + - TEEC_SharedMemory_IMP + - TEEC_Operation_IMP + + The implementation-dependent constants are: + - TEEC_CONFIG_SHAREDMEM_MAX_SIZE + The implementation-dependent macros are: + - TEEC_PARAM_TYPES +*/ +#include "tee_client_api_imp.h" + +/* Type definitions */ +typedef struct TEEC_Context +{ + TEEC_Context_IMP imp; +} TEEC_Context; + +typedef struct TEEC_Session +{ + TEEC_Session_IMP imp; +} TEEC_Session; + +typedef struct TEEC_SharedMemory +{ + void* buffer; + size_t size; + uint32_t flags; + TEEC_SharedMemory_IMP imp; +} TEEC_SharedMemory; + +typedef struct +{ + void* buffer; + size_t size; +} TEEC_TempMemoryReference; + +typedef struct +{ + TEEC_SharedMemory * parent; + size_t size; + size_t offset; +} TEEC_RegisteredMemoryReference; + +typedef struct +{ + uint32_t a; + uint32_t b; +} TEEC_Value; + +typedef union +{ + TEEC_TempMemoryReference tmpref; + TEEC_RegisteredMemoryReference memref; + TEEC_Value value; +} TEEC_Parameter; + +typedef struct TEEC_Operation +{ + volatile uint32_t started; + uint32_t paramTypes; + TEEC_Parameter params[4]; + TEEC_Operation_IMP imp; +} TEEC_Operation; + +#define TEEC_SUCCESS ((TEEC_Result)0x00000000) +#define TEEC_ERROR_GENERIC ((TEEC_Result)0xFFFF0000) +#define TEEC_ERROR_ACCESS_DENIED ((TEEC_Result)0xFFFF0001) +#define TEEC_ERROR_CANCEL ((TEEC_Result)0xFFFF0002) +#define TEEC_ERROR_ACCESS_CONFLICT ((TEEC_Result)0xFFFF0003) +#define TEEC_ERROR_EXCESS_DATA ((TEEC_Result)0xFFFF0004) +#define TEEC_ERROR_BAD_FORMAT ((TEEC_Result)0xFFFF0005) +#define TEEC_ERROR_BAD_PARAMETERS ((TEEC_Result)0xFFFF0006) +#define TEEC_ERROR_BAD_STATE ((TEEC_Result)0xFFFF0007) +#define TEEC_ERROR_ITEM_NOT_FOUND ((TEEC_Result)0xFFFF0008) +#define TEEC_ERROR_NOT_IMPLEMENTED ((TEEC_Result)0xFFFF0009) +#define TEEC_ERROR_NOT_SUPPORTED ((TEEC_Result)0xFFFF000A) +#define TEEC_ERROR_NO_DATA ((TEEC_Result)0xFFFF000B) +#define TEEC_ERROR_OUT_OF_MEMORY ((TEEC_Result)0xFFFF000C) +#define TEEC_ERROR_BUSY ((TEEC_Result)0xFFFF000D) +#define TEEC_ERROR_COMMUNICATION ((TEEC_Result)0xFFFF000E) +#define TEEC_ERROR_SECURITY ((TEEC_Result)0xFFFF000F) +#define TEEC_ERROR_SHORT_BUFFER ((TEEC_Result)0xFFFF0010) + +#define TEEC_ORIGIN_API 0x00000001 +#define TEEC_ORIGIN_COMMS 0x00000002 +#define TEEC_ORIGIN_TEE 0x00000003 +#define TEEC_ORIGIN_TRUSTED_APP 0x00000004 + +#define TEEC_MEM_INPUT 0x00000001 +#define TEEC_MEM_OUTPUT 0x00000002 + +#define TEEC_NONE 0x0 +#define TEEC_VALUE_INPUT 0x1 +#define TEEC_VALUE_OUTPUT 0x2 +#define TEEC_VALUE_INOUT 0x3 +#define TEEC_MEMREF_TEMP_INPUT 0x5 +#define TEEC_MEMREF_TEMP_OUTPUT 0x6 +#define TEEC_MEMREF_TEMP_INOUT 0x7 +#define TEEC_MEMREF_WHOLE 0xC +#define TEEC_MEMREF_PARTIAL_INPUT 0xD +#define TEEC_MEMREF_PARTIAL_OUTPUT 0xE +#define TEEC_MEMREF_PARTIAL_INOUT 0xF + +#define TEEC_LOGIN_PUBLIC 0x00000000 +#define TEEC_LOGIN_USER 0x00000001 +#define TEEC_LOGIN_GROUP 0x00000002 +#define TEEC_LOGIN_APPLICATION 0x00000004 +#define TEEC_LOGIN_USER_APPLICATION 0x00000005 +#define TEEC_LOGIN_GROUP_APPLICATION 0x00000006 + +TEEC_Result TEEC_EXPORT TEEC_InitializeContext( + const char* name, + TEEC_Context* context); + +void TEEC_EXPORT TEEC_FinalizeContext( + TEEC_Context* context); + +TEEC_Result TEEC_EXPORT TEEC_RegisterSharedMemory( + TEEC_Context* context, + TEEC_SharedMemory* sharedMem); + +TEEC_Result TEEC_EXPORT TEEC_AllocateSharedMemory( + TEEC_Context* context, + TEEC_SharedMemory* sharedMem); + +void TEEC_EXPORT TEEC_ReleaseSharedMemory ( + TEEC_SharedMemory* sharedMem); + +TEEC_Result TEEC_EXPORT TEEC_OpenSession ( + TEEC_Context* context, + TEEC_Session* session, + const TEEC_UUID* destination, + uint32_t connectionMethod, + void* connectionData, + TEEC_Operation* operation, + uint32_t* errorOrigin); + +void TEEC_EXPORT TEEC_CloseSession ( + TEEC_Session* session); + +TEEC_Result TEEC_EXPORT TEEC_InvokeCommand( + TEEC_Session* session, + uint32_t commandID, + TEEC_Operation* operation, + uint32_t* errorOrigin); + +void TEEC_EXPORT TEEC_RequestCancellation( + TEEC_Operation* operation); + +#include "tee_client_api_ex.h" + +#endif /* __TEE_CLIENT_API_H__ */ diff --git a/security/smc/tee_client_api_ex.h b/security/smc/tee_client_api_ex.h new file mode 100644 index 0000000..4988904 --- /dev/null +++ b/security/smc/tee_client_api_ex.h @@ -0,0 +1,59 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +/* + * This header file contains extensions to the TEE Client API that are + * specific to the Trusted Foundations implementations + */ +#ifndef __TEE_CLIENT_API_EX_H__ +#define __TEE_CLIENT_API_EX_H__ + +#include <linux/types.h> + +/* Implementation-defined login types */ +#define TEEC_LOGIN_AUTHENTICATION 0x80000000 +#define TEEC_LOGIN_PRIVILEGED 0x80000002 + +/* Type definitions */ + +typedef u64 TEEC_TimeLimit; + +void TEEC_EXPORT TEEC_GetTimeLimit( + TEEC_Context* context, + uint32_t timeout, + TEEC_TimeLimit* timeLimit); + +TEEC_Result TEEC_EXPORT TEEC_OpenSessionEx ( + TEEC_Context* context, + TEEC_Session* session, + const TEEC_TimeLimit* timeLimit, + const TEEC_UUID* destination, + uint32_t connectionMethod, + void* connectionData, + TEEC_Operation* operation, + uint32_t* errorOrigin); + +TEEC_Result TEEC_EXPORT TEEC_InvokeCommandEx( + TEEC_Session* session, + const TEEC_TimeLimit* timeLimit, + uint32_t commandID, + TEEC_Operation* operation, + uint32_t* errorOrigin); + +#endif /* __TEE_CLIENT_API_EX_H__ */ diff --git a/security/smc/tee_client_api_imp.h b/security/smc/tee_client_api_imp.h new file mode 100644 index 0000000..3073d63 --- /dev/null +++ b/security/smc/tee_client_api_imp.h @@ -0,0 +1,68 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +/* + * This header file defines the implementation-dependent types, + * constants and macros for all the Trusted Foundations implementations + * of the TEE Client API + */ +#ifndef __TEE_CLIENT_API_IMP_H__ +#define __TEE_CLIENT_API_IMP_H__ + +#include <linux/types.h> + +typedef u32 TEEC_Result; + +typedef struct TEEC_UUID +{ + uint32_t time_low; + uint16_t time_mid; + uint16_t time_hi_and_version; + uint8_t clock_seq_and_node[8]; +} TEEC_UUID; + +typedef struct { + struct tf_connection *_connection; +} TEEC_Context_IMP; + +typedef struct { + struct TEEC_Context* _context; + u32 _client_session; +} TEEC_Session_IMP; + +typedef struct { + struct TEEC_Context* _context; + u32 _block; + bool _allocated; +} TEEC_SharedMemory_IMP; + +typedef struct { + struct TEEC_Session* _pSession; +} TEEC_Operation_IMP; + +/* There is no natural, compile-time limit on the shared memory, but a specific + implementation may introduce a limit (in particular on TrustZone) */ +#define TEEC_CONFIG_SHAREDMEM_MAX_SIZE ((size_t)0xFFFFFFFF) + +#define TEEC_PARAM_TYPES(entry0Type, entry1Type, entry2Type, entry3Type) \ + ((entry0Type) | ((entry1Type) << 4) | \ + ((entry2Type) << 8) | ((entry3Type) << 12)) + + +#endif /* __TEE_CLIENT_API_IMP_H__ */ diff --git a/security/smc/tf_comm.c b/security/smc/tf_comm.c new file mode 100644 index 0000000..79b4034 --- /dev/null +++ b/security/smc/tf_comm.c @@ -0,0 +1,1746 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <asm/div64.h> +#include <asm/system.h> +#include <linux/version.h> +#include <asm/cputype.h> +#include <linux/interrupt.h> +#include <linux/page-flags.h> +#include <linux/pagemap.h> +#include <linux/vmalloc.h> +#include <linux/jiffies.h> +#include <linux/freezer.h> + +#include "tf_defs.h" +#include "tf_comm.h" +#include "tf_protocol.h" +#include "tf_util.h" +#include "tf_conn.h" + +#ifdef CONFIG_TF_ZEBRA +#include "tf_zebra.h" +#endif + +/*--------------------------------------------------------------------------- + * Internal Constants + *---------------------------------------------------------------------------*/ + +/* + * shared memories descriptor constants + */ +#define DESCRIPTOR_B_MASK (1 << 2) +#define DESCRIPTOR_C_MASK (1 << 3) +#define DESCRIPTOR_S_MASK (1 << 10) + +#define L1_COARSE_DESCRIPTOR_BASE (0x00000001) +#define L1_COARSE_DESCRIPTOR_ADDR_MASK (0xFFFFFC00) +#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5) + +#define L2_PAGE_DESCRIPTOR_BASE (0x00000003) +#define L2_PAGE_DESCRIPTOR_AP_APX_READ (0x220) +#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30) + +#define L2_INIT_DESCRIPTOR_BASE (0x00000003) +#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4) + +/* + * Reject an attempt to share a strongly-Ordered or Device memory + * Strongly-Ordered: TEX=0b000, C=0, B=0 + * Shared Device: TEX=0b000, C=0, B=1 + * Non-Shared Device: TEX=0b010, C=0, B=0 + */ +#define L2_TEX_C_B_MASK \ + ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2)) +#define L2_TEX_C_B_STRONGLY_ORDERED \ + ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2)) +#define L2_TEX_C_B_SHARED_DEVICE \ + ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2)) +#define L2_TEX_C_B_NON_SHARED_DEVICE \ + ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2)) + +#define CACHE_S(x) ((x) & (1 << 24)) +#define CACHE_DSIZE(x) (((x) >> 12) & 4095) + +#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL) +#define TIME_INFINITE ((u64) 0xFFFFFFFFFFFFFFFFULL) + +/*--------------------------------------------------------------------------- + * atomic operation definitions + *---------------------------------------------------------------------------*/ + +/* + * Atomically updates the sync_serial_n and time_n register + * sync_serial_n and time_n modifications are thread safe + */ +void tf_set_current_time(struct tf_comm *comm) +{ + u32 new_sync_serial; + struct timeval now; + u64 time64; + + /* + * lock the structure while updating the L1 shared memory fields + */ + spin_lock(&comm->lock); + + /* read sync_serial_n and change the TimeSlot bit field */ + new_sync_serial = + tf_read_reg32(&comm->pBuffer->sync_serial_n) + 1; + + do_gettimeofday(&now); + time64 = now.tv_sec; + time64 = (time64 * 1000) + (now.tv_usec / 1000); + + /* Write the new time64 and nSyncSerial into shared memory */ + tf_write_reg64(&comm->pBuffer->time_n[new_sync_serial & + TF_SYNC_SERIAL_TIMESLOT_N], time64); + tf_write_reg32(&comm->pBuffer->sync_serial_n, + new_sync_serial); + + spin_unlock(&comm->lock); +} + +/* + * Performs the specific read timeout operation + * The difficulty here is to read atomically 2 u32 + * values from the L1 shared buffer. + * This is guaranteed by reading before and after the operation + * the timeslot given by the Secure World + */ +static inline void tf_read_timeout(struct tf_comm *comm, u64 *time) +{ + u32 sync_serial_s_initial = 0; + u32 sync_serial_s_final = 1; + u64 time64; + + spin_lock(&comm->lock); + + while (sync_serial_s_initial != sync_serial_s_final) { + sync_serial_s_initial = tf_read_reg32( + &comm->pBuffer->sync_serial_s); + time64 = tf_read_reg64( + &comm->pBuffer->timeout_s[sync_serial_s_initial&1]); + + sync_serial_s_final = tf_read_reg32( + &comm->pBuffer->sync_serial_s); + } + + spin_unlock(&comm->lock); + + *time = time64; +} + +/*---------------------------------------------------------------------------- + * SIGKILL signal handling + *----------------------------------------------------------------------------*/ + +static bool sigkill_pending(void) +{ + if (signal_pending(current)) { + dprintk(KERN_INFO "A signal is pending\n"); + if (sigismember(¤t->pending.signal, SIGKILL)) { + dprintk(KERN_INFO "A SIGKILL is pending\n"); + return true; + } else if (sigismember( + ¤t->signal->shared_pending.signal, SIGKILL)) { + dprintk(KERN_INFO "A SIGKILL is pending (shared)\n"); + return true; + } + } + return false; +} + +/*---------------------------------------------------------------------------- + * Shared memory related operations + *----------------------------------------------------------------------------*/ + +struct tf_coarse_page_table *tf_alloc_coarse_page_table( + struct tf_coarse_page_table_allocation_context *alloc_context, + u32 type) +{ + struct tf_coarse_page_table *coarse_pg_table = NULL; + + spin_lock(&(alloc_context->lock)); + + if (!(list_empty(&(alloc_context->free_coarse_page_tables)))) { + /* + * The free list can provide us a coarse page table + * descriptor + */ + coarse_pg_table = list_first_entry( + &alloc_context->free_coarse_page_tables, + struct tf_coarse_page_table, list); + list_del(&(coarse_pg_table->list)); + + coarse_pg_table->parent->ref_count++; + } else { + /* no array of coarse page tables, create a new one */ + struct tf_coarse_page_table_array *array; + void *page; + int i; + + spin_unlock(&(alloc_context->lock)); + + /* first allocate a new page descriptor */ + array = internal_kmalloc(sizeof(*array), GFP_KERNEL); + if (array == NULL) { + dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):" + " failed to allocate a table array\n", + alloc_context); + return NULL; + } + + array->type = type; + INIT_LIST_HEAD(&(array->list)); + + /* now allocate the actual page the page descriptor describes */ + page = (void *) internal_get_zeroed_page(GFP_KERNEL); + if (page == NULL) { + dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):" + " failed allocate a page\n", + alloc_context); + internal_kfree(array); + return NULL; + } + + spin_lock(&(alloc_context->lock)); + + /* initialize the coarse page table descriptors */ + for (i = 0; i < 4; i++) { + INIT_LIST_HEAD(&(array->coarse_page_tables[i].list)); + array->coarse_page_tables[i].descriptors = + page + (i * SIZE_1KB); + array->coarse_page_tables[i].parent = array; + + if (i == 0) { + /* + * the first element is kept for the current + * coarse page table allocation + */ + coarse_pg_table = + &(array->coarse_page_tables[i]); + array->ref_count++; + } else { + /* + * The other elements are added to the free list + */ + list_add(&(array->coarse_page_tables[i].list), + &(alloc_context-> + free_coarse_page_tables)); + } + } + + list_add(&(array->list), + &(alloc_context->coarse_page_table_arrays)); + } + spin_unlock(&(alloc_context->lock)); + + return coarse_pg_table; +} + + +void tf_free_coarse_page_table( + struct tf_coarse_page_table_allocation_context *alloc_context, + struct tf_coarse_page_table *coarse_pg_table, + int force) +{ + struct tf_coarse_page_table_array *array; + + spin_lock(&(alloc_context->lock)); + + array = coarse_pg_table->parent; + + (array->ref_count)--; + + if (array->ref_count == 0) { + /* + * no coarse page table descriptor is used + * check if we should free the whole page + */ + + if ((array->type == TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED) + && (force == 0)) + /* + * This is a preallocated page, + * add the page back to the free list + */ + list_add(&(coarse_pg_table->list), + &(alloc_context->free_coarse_page_tables)); + else { + /* + * None of the page's coarse page table descriptors + * are in use, free the whole page + */ + int i; + u32 *descriptors; + + /* + * remove the page's associated coarse page table + * descriptors from the free list + */ + for (i = 0; i < 4; i++) + if (&(array->coarse_page_tables[i]) != + coarse_pg_table) + list_del(&(array-> + coarse_page_tables[i].list)); + + descriptors = + array->coarse_page_tables[0].descriptors; + array->coarse_page_tables[0].descriptors = NULL; + + /* remove the coarse page table from the array */ + list_del(&(array->list)); + + spin_unlock(&(alloc_context->lock)); + /* + * Free the page. + * The address of the page is contained in the first + * element + */ + internal_free_page((unsigned long) descriptors); + /* finaly free the array */ + internal_kfree(array); + + spin_lock(&(alloc_context->lock)); + } + } else { + /* + * Some coarse page table descriptors are in use. + * Add the descriptor to the free list + */ + list_add(&(coarse_pg_table->list), + &(alloc_context->free_coarse_page_tables)); + } + + spin_unlock(&(alloc_context->lock)); +} + + +void tf_init_coarse_page_table_allocator( + struct tf_coarse_page_table_allocation_context *alloc_context) +{ + spin_lock_init(&(alloc_context->lock)); + INIT_LIST_HEAD(&(alloc_context->coarse_page_table_arrays)); + INIT_LIST_HEAD(&(alloc_context->free_coarse_page_tables)); +} + +void tf_release_coarse_page_table_allocator( + struct tf_coarse_page_table_allocation_context *alloc_context) +{ + spin_lock(&(alloc_context->lock)); + + /* now clean up the list of page descriptors */ + while (!list_empty(&(alloc_context->coarse_page_table_arrays))) { + struct tf_coarse_page_table_array *page_desc; + u32 *descriptors; + + page_desc = list_first_entry( + &alloc_context->coarse_page_table_arrays, + struct tf_coarse_page_table_array, list); + + descriptors = page_desc->coarse_page_tables[0].descriptors; + list_del(&(page_desc->list)); + + spin_unlock(&(alloc_context->lock)); + + if (descriptors != NULL) + internal_free_page((unsigned long)descriptors); + + internal_kfree(page_desc); + + spin_lock(&(alloc_context->lock)); + } + + spin_unlock(&(alloc_context->lock)); +} + +/* + * Returns the L1 coarse page descriptor for + * a coarse page table located at address coarse_pg_table_descriptors + */ +u32 tf_get_l1_coarse_descriptor( + u32 coarse_pg_table_descriptors[256]) +{ + u32 descriptor = L1_COARSE_DESCRIPTOR_BASE; + unsigned int info = read_cpuid(CPUID_CACHETYPE); + + descriptor |= (virt_to_phys((void *) coarse_pg_table_descriptors) + & L1_COARSE_DESCRIPTOR_ADDR_MASK); + + if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) { + dprintk(KERN_DEBUG "tf_get_l1_coarse_descriptor " + "V31-12 added to descriptor\n"); + /* the 16k alignment restriction applies */ + descriptor |= (DESCRIPTOR_V13_12_GET( + (u32)coarse_pg_table_descriptors) << + L1_COARSE_DESCRIPTOR_V13_12_SHIFT); + } + + return descriptor; +} + + +#define dprintk_desc(...) +/* + * Returns the L2 descriptor for the specified user page. + */ +u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm) +{ + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *ptep; + u32 *hwpte; + u32 tex = 0; + u32 descriptor = 0; + + dprintk_desc(KERN_INFO "VirtAddr = %x\n", vaddr); + pgd = pgd_offset(mm, vaddr); + dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd, + (unsigned int) *pgd); + if (pgd_none(*pgd)) + goto error; + pud = pud_offset(pgd, vaddr); + dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud, + (unsigned int) *pud); + if (pud_none(*pud)) + goto error; + pmd = pmd_offset(pud, vaddr); + dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd, + (unsigned int) *pmd); + if (pmd_none(*pmd)) + goto error; + + if (PMD_TYPE_SECT&(*pmd)) { + /* We have a section */ + dprintk_desc(KERN_INFO "Section descr=%x\n", + (unsigned int)*pmd); + if ((*pmd) & PMD_SECT_BUFFERABLE) + descriptor |= DESCRIPTOR_B_MASK; + if ((*pmd) & PMD_SECT_CACHEABLE) + descriptor |= DESCRIPTOR_C_MASK; + if ((*pmd) & PMD_SECT_S) + descriptor |= DESCRIPTOR_S_MASK; + tex = ((*pmd) >> 12) & 7; + } else { + /* We have a table */ + ptep = pte_offset_map(pmd, vaddr); + if (pte_present(*ptep)) { + dprintk_desc(KERN_INFO "L2 descr=%x\n", + (unsigned int) *ptep); + if ((*ptep) & L_PTE_MT_BUFFERABLE) + descriptor |= DESCRIPTOR_B_MASK; + if ((*ptep) & L_PTE_MT_WRITETHROUGH) + descriptor |= DESCRIPTOR_C_MASK; + if ((*ptep) & L_PTE_MT_DEV_SHARED) + descriptor |= DESCRIPTOR_S_MASK; + + /* + * Linux's pte doesn't keep track of TEX value. + * Have to jump to hwpte see include/asm/pgtable.h + */ + hwpte = (u32 *) (((u32) ptep) - 0x800); + if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) != + ((*ptep) & L2_DESCRIPTOR_ADDR_MASK)) + goto error; + dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte); + tex = ((*hwpte) >> 6) & 7; + pte_unmap(ptep); + } else { + pte_unmap(ptep); + goto error; + } + } + + descriptor |= (tex << 6); + + return descriptor; + +error: + dprintk(KERN_ERR "Error occured in %s\n", __func__); + return 0; +} + + +/* + * Changes an L2 page descriptor back to a pointer to a physical page + */ +inline struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor) +{ + return pte_page(l2_page_descriptor & L2_DESCRIPTOR_ADDR_MASK); +} + +#define TF_DEFAULT_COMMON_DESCRIPTORS 0x0000044C + +/* + * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address + * must be in the kernel address space. + */ +static void tf_get_l2_page_descriptor( + u32 *l2_page_descriptor, + u32 flags, struct mm_struct *mm, struct vm_area_struct *vmas) +{ + u32 descriptor; + struct page *page; + + dprintk(KERN_INFO + "%s *l2_page_descriptor=%x vm_flags=%lx\n", + __func__, *l2_page_descriptor, vmas->vm_flags); + + if (*l2_page_descriptor == L2_DESCRIPTOR_FAULT) + return; + + if (vmas->vm_flags & VM_IO) { + *l2_page_descriptor = L2_DESCRIPTOR_FAULT; + dprintk(KERN_ERR "Memory mapped I/O or similar detected\n"); + return; + } + page = (struct page *) (*l2_page_descriptor); + + descriptor = TF_DEFAULT_COMMON_DESCRIPTORS; + descriptor |= L2_PAGE_DESCRIPTOR_BASE; + + descriptor |= (page_to_phys(page) & L2_DESCRIPTOR_ADDR_MASK); + + if (!(flags & TF_SHMEM_TYPE_WRITE)) + /* only read access */ + descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ; + else + /* read and write access */ + descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE; + + + *l2_page_descriptor = descriptor; +} + + +/* + * Unlocks the physical memory pages + * and frees the coarse pages that need to + */ +void tf_cleanup_shared_memory( + struct tf_coarse_page_table_allocation_context *alloc_context, + struct tf_shmem_desc *shmem_desc, + u32 full_cleanup) +{ + u32 coarse_page_index; + + dprintk(KERN_INFO "tf_cleanup_shared_memory(%p)\n", + shmem_desc); + +#ifdef DEBUG_COARSE_TABLES + printk(KERN_DEBUG "tf_cleanup_shared_memory " + "- number of coarse page tables=%d\n", + shmem_desc->coarse_pg_table_count); + + for (coarse_page_index = 0; + coarse_page_index < shmem_desc->coarse_pg_table_count; + coarse_page_index++) { + u32 j; + + printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n", + shmem_desc->coarse_pg_table[coarse_page_index], + shmem_desc->coarse_pg_table[coarse_page_index]-> + descriptors, + coarse_page_index); + if (shmem_desc->coarse_pg_table[coarse_page_index] != NULL) { + for (j = 0; + j < TF_DESCRIPTOR_TABLE_CAPACITY; + j += 8) { + int k; + printk(KERN_DEBUG " "); + for (k = j; k < j + 8; k++) + printk(KERN_DEBUG "%p ", + shmem_desc->coarse_pg_table[ + coarse_page_index]-> + descriptors); + printk(KERN_DEBUG "\n"); + } + } + } + printk(KERN_DEBUG "tf_cleanup_shared_memory() - done\n\n"); +#endif + + /* Parse the coarse page descriptors */ + for (coarse_page_index = 0; + coarse_page_index < shmem_desc->coarse_pg_table_count; + coarse_page_index++) { + u32 j; + u32 found = 0; + + /* parse the page descriptors of the coarse page */ + for (j = 0; j < TF_DESCRIPTOR_TABLE_CAPACITY; j++) { + u32 l2_page_descriptor = (u32) (shmem_desc-> + coarse_pg_table[coarse_page_index]-> + descriptors[j]); + + if (l2_page_descriptor != L2_DESCRIPTOR_FAULT) { + struct page *page = + tf_l2_page_descriptor_to_page( + l2_page_descriptor); + + if (!PageReserved(page)) + SetPageDirty(page); + internal_page_cache_release(page); + + found = 1; + } else if (found == 1) { + break; + } + } + + /* + * Only free the coarse pages of descriptors not preallocated + */ + if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) || + (full_cleanup != 0)) + tf_free_coarse_page_table(alloc_context, + shmem_desc->coarse_pg_table[coarse_page_index], + 0); + } + + shmem_desc->coarse_pg_table_count = 0; + dprintk(KERN_INFO "tf_cleanup_shared_memory(%p) done\n", + shmem_desc); +} + +/* + * Make sure the coarse pages are allocated. If not allocated, do it Locks down + * the physical memory pages + * Verifies the memory attributes depending on flags + */ +int tf_fill_descriptor_table( + struct tf_coarse_page_table_allocation_context *alloc_context, + struct tf_shmem_desc *shmem_desc, + u32 buffer, + struct vm_area_struct **vmas, + u32 descriptors[TF_MAX_COARSE_PAGES], + u32 buffer_size, + u32 *buffer_start_offset, + bool in_user_space, + u32 flags, + u32 *descriptor_count) +{ + u32 coarse_page_index; + u32 coarse_page_count; + u32 page_count; + u32 page_shift = 0; + int error; + unsigned int info = read_cpuid(CPUID_CACHETYPE); + + dprintk(KERN_INFO "tf_fill_descriptor_table" + "(%p, buffer=0x%08X, size=0x%08X, user=%01x " + "flags = 0x%08x)\n", + shmem_desc, + buffer, + buffer_size, + in_user_space, + flags); + + /* + * Compute the number of pages + * Compute the number of coarse pages + * Compute the page offset + */ + page_count = ((buffer & ~PAGE_MASK) + + buffer_size + ~PAGE_MASK) >> PAGE_SHIFT; + + /* check whether the 16k alignment restriction applies */ + if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) + /* + * The 16k alignment restriction applies. + * Shift data to get them 16k aligned + */ + page_shift = DESCRIPTOR_V13_12_GET(buffer); + page_count += page_shift; + + + /* + * Check the number of pages fit in the coarse pages + */ + if (page_count > (TF_DESCRIPTOR_TABLE_CAPACITY * + TF_MAX_COARSE_PAGES)) { + dprintk(KERN_ERR "tf_fill_descriptor_table(%p): " + "%u pages required to map shared memory!\n", + shmem_desc, page_count); + error = -ENOMEM; + goto error; + } + + /* coarse page describe 256 pages */ + coarse_page_count = ((page_count + + TF_DESCRIPTOR_TABLE_CAPACITY_MASK) >> + TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT); + + /* + * Compute the buffer offset + */ + *buffer_start_offset = (buffer & ~PAGE_MASK) | + (page_shift << PAGE_SHIFT); + + /* map each coarse page */ + for (coarse_page_index = 0; + coarse_page_index < coarse_page_count; + coarse_page_index++) { + u32 j; + struct tf_coarse_page_table *coarse_pg_table; + + /* compute a virtual address with appropriate offset */ + u32 buffer_offset_vaddr = buffer + + (coarse_page_index * TF_MAX_COARSE_PAGE_MAPPED_SIZE); + u32 pages_to_get; + + /* + * Compute the number of pages left for this coarse page. + * Decrement page_count each time + */ + pages_to_get = (page_count >> + TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ? + TF_DESCRIPTOR_TABLE_CAPACITY : page_count; + page_count -= pages_to_get; + + /* + * Check if the coarse page has already been allocated + * If not, do it now + */ + if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) + || (shmem_desc->type == + TF_SHMEM_TYPE_PM_HIBERNATE)) { + coarse_pg_table = tf_alloc_coarse_page_table( + alloc_context, + TF_PAGE_DESCRIPTOR_TYPE_NORMAL); + + if (coarse_pg_table == NULL) { + dprintk(KERN_ERR + "tf_fill_descriptor_table(%p):" + " SCXLNXConnAllocateCoarsePageTable " + "failed for coarse page %d\n", + shmem_desc, coarse_page_index); + error = -ENOMEM; + goto error; + } + + shmem_desc->coarse_pg_table[coarse_page_index] = + coarse_pg_table; + } else { + coarse_pg_table = + shmem_desc->coarse_pg_table[coarse_page_index]; + } + + /* + * The page is not necessarily filled with zeroes. + * Set the fault descriptors ( each descriptor is 4 bytes long) + */ + memset(coarse_pg_table->descriptors, 0x00, + TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)); + + if (in_user_space) { + int pages; + + /* + * TRICK: use pCoarsePageDescriptor->descriptors to + * hold the (struct page*) items before getting their + * physical address + */ + down_read(&(current->mm->mmap_sem)); + pages = internal_get_user_pages( + current, + current->mm, + buffer_offset_vaddr, + /* + * page_shift is cleared after retrieving first + * coarse page + */ + (pages_to_get - page_shift), + (flags & TF_SHMEM_TYPE_WRITE) ? 1 : 0, + 0, + (struct page **) (coarse_pg_table->descriptors + + page_shift), + vmas); + up_read(&(current->mm->mmap_sem)); + + if ((pages <= 0) || + (pages != (pages_to_get - page_shift))) { + dprintk(KERN_ERR"tf_fill_descriptor_table:" + " get_user_pages got %d pages while " + "trying to get %d pages!\n", + pages, pages_to_get - page_shift); + error = -EFAULT; + goto error; + } + + for (j = page_shift; + j < page_shift + pages; + j++) { + /* Get the actual L2 descriptors */ + tf_get_l2_page_descriptor( + &coarse_pg_table->descriptors[j], + flags, + current->mm, + vmas[j]); + /* + * Reject Strongly-Ordered or Device Memory + */ +#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \ + ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \ + (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \ + (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE)) + + if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM( + coarse_pg_table-> + descriptors[j])) { + dprintk(KERN_ERR + "tf_fill_descriptor_table:" + " descriptor 0x%08X use " + "strongly-ordered or device " + "memory. Rejecting!\n", + coarse_pg_table-> + descriptors[j]); + error = -EFAULT; + goto error; + } + } + } else { + /* Kernel-space memory */ + for (j = page_shift; + j < pages_to_get; + j++) { + void *addr = + (void *)(buffer_offset_vaddr + + (j - page_shift) * PAGE_SIZE); + if (!is_vmalloc_addr(addr)) { + dprintk(KERN_ERR + "tf_fill_descriptor_table: " + "cannot handle address %p\n", + addr); + goto error; + } + struct page *page = vmalloc_to_page(addr); + if (page == NULL) { + dprintk(KERN_ERR + "tf_fill_descriptor_table: " + "cannot map %p to page\n", + addr); + goto error; + } + coarse_pg_table->descriptors[j] = (u32)page; + get_page(page); + + /* change coarse page "page address" */ + tf_get_l2_page_descriptor( + &coarse_pg_table->descriptors[j], + flags, + &init_mm, + vmas[j]); + } + } + + dmac_flush_range((void *)coarse_pg_table->descriptors, + (void *)(((u32)(coarse_pg_table->descriptors)) + + TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32))); + + outer_clean_range( + __pa(coarse_pg_table->descriptors), + __pa(coarse_pg_table->descriptors) + + TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)); + wmb(); + + /* Update the coarse page table address */ + descriptors[coarse_page_index] = + tf_get_l1_coarse_descriptor( + coarse_pg_table->descriptors); + + /* + * The next coarse page has no page shift, reset the + * page_shift + */ + page_shift = 0; + } + + *descriptor_count = coarse_page_count; + shmem_desc->coarse_pg_table_count = coarse_page_count; + +#ifdef DEBUG_COARSE_TABLES + printk(KERN_DEBUG "ntf_fill_descriptor_table - size=0x%08X " + "numberOfCoarsePages=%d\n", buffer_size, + shmem_desc->coarse_pg_table_count); + for (coarse_page_index = 0; + coarse_page_index < shmem_desc->coarse_pg_table_count; + coarse_page_index++) { + u32 j; + struct tf_coarse_page_table *coarse_page_table = + shmem_desc->coarse_pg_table[coarse_page_index]; + + printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n", + coarse_page_table, + coarse_page_table->descriptors, + coarse_page_index); + for (j = 0; + j < TF_DESCRIPTOR_TABLE_CAPACITY; + j += 8) { + int k; + printk(KERN_DEBUG " "); + for (k = j; k < j + 8; k++) + printk(KERN_DEBUG "0x%08X ", + coarse_page_table->descriptors[k]); + printk(KERN_DEBUG "\n"); + } + } + printk(KERN_DEBUG "ntf_fill_descriptor_table() - done\n\n"); +#endif + + return 0; + +error: + tf_cleanup_shared_memory( + alloc_context, + shmem_desc, + 0); + + return error; +} + + +/*---------------------------------------------------------------------------- + * Standard communication operations + *----------------------------------------------------------------------------*/ + +u8 *tf_get_description(struct tf_comm *comm) +{ + if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) + return comm->pBuffer->version_description; + + return NULL; +} + +/* + * Returns a non-zero value if the specified S-timeout has expired, zero + * otherwise. + * + * The placeholder referenced to by relative_timeout_jiffies gives the relative + * timeout from now in jiffies. It is set to zero if the S-timeout has expired, + * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite. + */ +static int tf_test_s_timeout( + u64 timeout, + signed long *relative_timeout_jiffies) +{ + struct timeval now; + u64 time64; + + *relative_timeout_jiffies = 0; + + /* immediate timeout */ + if (timeout == TIME_IMMEDIATE) + return 1; + + /* infinite timeout */ + if (timeout == TIME_INFINITE) { + dprintk(KERN_DEBUG "tf_test_s_timeout: " + "timeout is infinite\n"); + *relative_timeout_jiffies = MAX_SCHEDULE_TIMEOUT; + return 0; + } + + do_gettimeofday(&now); + time64 = now.tv_sec; + /* will not overflow as operations are done on 64bit values */ + time64 = (time64 * 1000) + (now.tv_usec / 1000); + + /* timeout expired */ + if (time64 >= timeout) { + dprintk(KERN_DEBUG "tf_test_s_timeout: timeout expired\n"); + return 1; + } + + /* + * finite timeout, compute relative_timeout_jiffies + */ + /* will not overflow as time64 < timeout */ + timeout -= time64; + + /* guarantee *relative_timeout_jiffies is a valid timeout */ + if ((timeout >> 32) != 0) + *relative_timeout_jiffies = MAX_JIFFY_OFFSET; + else + *relative_timeout_jiffies = + msecs_to_jiffies((unsigned int) timeout); + + dprintk(KERN_DEBUG "tf_test_s_timeout: timeout is 0x%lx\n", + *relative_timeout_jiffies); + return 0; +} + +static void tf_copy_answers(struct tf_comm *comm) +{ + u32 first_answer; + u32 first_free_answer; + struct tf_answer_struct *answerStructureTemp; + + if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) { + spin_lock(&comm->lock); + first_free_answer = tf_read_reg32( + &comm->pBuffer->first_free_answer); + first_answer = tf_read_reg32( + &comm->pBuffer->first_answer); + + while (first_answer != first_free_answer) { + /* answer queue not empty */ + union tf_answer sComAnswer; + struct tf_answer_header header; + + /* + * the size of the command in words of 32bit, not in + * bytes + */ + u32 command_size; + u32 i; + u32 *temp = (uint32_t *) &header; + + dprintk(KERN_INFO + "[pid=%d] tf_copy_answers(%p): " + "Read answers from L1\n", + current->pid, comm); + + /* Read the answer header */ + for (i = 0; + i < sizeof(struct tf_answer_header)/sizeof(u32); + i++) + temp[i] = comm->pBuffer->answer_queue[ + (first_answer + i) % + TF_S_ANSWER_QUEUE_CAPACITY]; + + /* Read the answer from the L1_Buffer*/ + command_size = header.message_size + + sizeof(struct tf_answer_header)/sizeof(u32); + temp = (uint32_t *) &sComAnswer; + for (i = 0; i < command_size; i++) + temp[i] = comm->pBuffer->answer_queue[ + (first_answer + i) % + TF_S_ANSWER_QUEUE_CAPACITY]; + + answerStructureTemp = (struct tf_answer_struct *) + sComAnswer.header.operation_id; + + tf_dump_answer(&sComAnswer); + + memcpy(answerStructureTemp->answer, &sComAnswer, + command_size * sizeof(u32)); + answerStructureTemp->answer_copied = true; + + first_answer += command_size; + tf_write_reg32(&comm->pBuffer->first_answer, + first_answer); + } + spin_unlock(&(comm->lock)); + } +} + +static void tf_copy_command( + struct tf_comm *comm, + union tf_command *command, + struct tf_connection *connection, + enum TF_COMMAND_STATE *command_status) +{ + if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) + && (command != NULL)) { + /* + * Write the message in the message queue. + */ + + if (*command_status == TF_COMMAND_STATE_PENDING) { + u32 command_size; + u32 queue_words_count; + u32 i; + u32 first_free_command; + u32 first_command; + + spin_lock(&comm->lock); + + first_command = tf_read_reg32( + &comm->pBuffer->first_command); + first_free_command = tf_read_reg32( + &comm->pBuffer->first_free_command); + + queue_words_count = first_free_command - first_command; + command_size = command->header.message_size + + sizeof(struct tf_command_header)/sizeof(u32); + if ((queue_words_count + command_size) < + TF_N_MESSAGE_QUEUE_CAPACITY) { + /* + * Command queue is not full. + * If the Command queue is full, + * the command will be copied at + * another iteration + * of the current function. + */ + + /* + * Change the conn state + */ + if (connection == NULL) + goto copy; + + spin_lock(&(connection->state_lock)); + + if ((connection->state == + TF_CONN_STATE_NO_DEVICE_CONTEXT) + && + (command->header.message_type == + TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) { + + dprintk(KERN_INFO + "tf_copy_command(%p):" + "Conn state is DEVICE_CONTEXT_SENT\n", + connection); + connection->state = + TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT; + } else if ((connection->state != + TF_CONN_STATE_VALID_DEVICE_CONTEXT) + && + (command->header.message_type != + TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) { + /* The connection + * is no longer valid. + * We may not send any command on it, + * not even another + * DESTROY_DEVICE_CONTEXT. + */ + dprintk(KERN_INFO + "[pid=%d] tf_copy_command(%p): " + "Connection no longer valid." + "ABORT\n", + current->pid, connection); + *command_status = + TF_COMMAND_STATE_ABORTED; + spin_unlock( + &(connection->state_lock)); + spin_unlock( + &comm->lock); + return; + } else if ( + (command->header.message_type == + TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) && + (connection->state == + TF_CONN_STATE_VALID_DEVICE_CONTEXT) + ) { + dprintk(KERN_INFO + "[pid=%d] tf_copy_command(%p): " + "Conn state is " + "DESTROY_DEVICE_CONTEXT_SENT\n", + current->pid, connection); + connection->state = + TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT; + } + spin_unlock(&(connection->state_lock)); +copy: + /* + * Copy the command to L1 Buffer + */ + dprintk(KERN_INFO + "[pid=%d] tf_copy_command(%p): " + "Write Message in the queue\n", + current->pid, command); + tf_dump_command(command); + + for (i = 0; i < command_size; i++) + comm->pBuffer->command_queue[ + (first_free_command + i) % + TF_N_MESSAGE_QUEUE_CAPACITY] = + ((uint32_t *) command)[i]; + + *command_status = + TF_COMMAND_STATE_SENT; + first_free_command += command_size; + + tf_write_reg32( + &comm-> + pBuffer->first_free_command, + first_free_command); + } + spin_unlock(&comm->lock); + } + } +} + +/* + * Sends the specified message through the specified communication channel. + * + * This function sends the command and waits for the answer + * + * Returns zero upon successful completion, or an appropriate error code upon + * failure. + */ +static int tf_send_recv(struct tf_comm *comm, + union tf_command *command, + struct tf_answer_struct *answerStruct, + struct tf_connection *connection, + int bKillable + #ifdef CONFIG_TF_ZEBRA + , bool *secure_is_idle + #endif + ) +{ + int result; + u64 timeout; + signed long nRelativeTimeoutJiffies; + bool wait_prepared = false; + enum TF_COMMAND_STATE command_status = TF_COMMAND_STATE_PENDING; + DEFINE_WAIT(wait); +#ifdef CONFIG_FREEZER + unsigned long saved_flags; +#endif + dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n", + current->pid, command); + +#ifdef CONFIG_FREEZER + saved_flags = current->flags; + current->flags |= PF_FREEZER_NOSIG; +#endif + + /* + * Read all answers from the answer queue + */ +copy_answers: + tf_copy_answers(comm); + + tf_copy_command(comm, command, connection, &command_status); + + /* + * Notify all waiting threads + */ + wake_up(&(comm->wait_queue)); + +#ifdef CONFIG_FREEZER + if (unlikely(freezing(current))) { + +#ifdef CONFIG_TF_ZEBRA + if (!(*secure_is_idle)) { + if (tf_schedule_secure_world(comm, true) == + STATUS_PENDING) + goto copy_answers; + + tf_l4sec_clkdm_allow_idle(true); + *secure_is_idle = true; + } +#endif + + dprintk(KERN_INFO + "Entering refrigerator.\n"); + refrigerator(); + dprintk(KERN_INFO + "Left refrigerator.\n"); + goto copy_answers; + } +#endif + +#ifndef CONFIG_PREEMPT + if (need_resched()) + schedule(); +#endif + +#ifdef CONFIG_TF_ZEBRA + /* + * Handle RPC (if any) + */ + if (tf_rpc_execute(comm) == RPC_NON_YIELD) + goto schedule_secure_world; +#endif + + /* + * Join wait queue + */ + /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n", + current->pid, command);*/ + prepare_to_wait(&comm->wait_queue, &wait, + bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); + wait_prepared = true; + + /* + * Check if our answer is available + */ + if (command_status == TF_COMMAND_STATE_ABORTED) { + /* Not waiting for an answer, return error code */ + result = -EINTR; + dprintk(KERN_ERR "[pid=%d] tf_send_recv: " + "Command status is ABORTED." + "Exit with 0x%x\n", + current->pid, result); + goto exit; + } + if (answerStruct->answer_copied) { + dprintk(KERN_INFO "[pid=%d] tf_send_recv: " + "Received answer (type 0x%02X)\n", + current->pid, + answerStruct->answer->header.message_type); + result = 0; + goto exit; + } + + /* + * Check if a signal is pending + */ + if (bKillable && (sigkill_pending())) { + if (command_status == TF_COMMAND_STATE_PENDING) + /*Command was not sent. */ + result = -EINTR; + else + /* Command was sent but no answer was received yet. */ + result = -EIO; + + dprintk(KERN_ERR "[pid=%d] tf_send_recv: " + "Signal Pending. Return error %d\n", + current->pid, result); + goto exit; + } + + /* + * Check if secure world is schedulable. It is schedulable if at + * least one of the following conditions holds: + * + it is still initializing (TF_COMM_FLAG_L1_SHARED_ALLOCATED + * is not set); + * + there is a command in the queue; + * + the secure world timeout is zero. + */ + if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) { + u32 first_free_command; + u32 first_command; + spin_lock(&comm->lock); + first_command = tf_read_reg32( + &comm->pBuffer->first_command); + first_free_command = tf_read_reg32( + &comm->pBuffer->first_free_command); + spin_unlock(&comm->lock); + tf_read_timeout(comm, &timeout); + if ((first_free_command == first_command) && + (tf_test_s_timeout(timeout, + &nRelativeTimeoutJiffies) == 0)) + /* + * If command queue is empty and if timeout has not + * expired secure world is not schedulable + */ + goto wait; + } + + finish_wait(&comm->wait_queue, &wait); + wait_prepared = false; + + /* + * Yield to the Secure World + */ +#ifdef CONFIG_TF_ZEBRA +schedule_secure_world: + if (*secure_is_idle) { + tf_l4sec_clkdm_wakeup(true); + *secure_is_idle = false; + } +#endif + + result = tf_schedule_secure_world(comm, false); + if (result < 0) + goto exit; + goto copy_answers; + +wait: + if (bKillable && (sigkill_pending())) { + if (command_status == TF_COMMAND_STATE_PENDING) + result = -EINTR; /* Command was not sent. */ + else + /* Command was sent but no answer was received yet. */ + result = -EIO; + + dprintk(KERN_ERR "[pid=%d] tf_send_recv: " + "Signal Pending while waiting. Return error %d\n", + current->pid, result); + goto exit; + } + + if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT) + dprintk(KERN_INFO "[pid=%d] tf_send_recv: " + "prepare to sleep infinitely\n", current->pid); + else + dprintk(KERN_INFO "tf_send_recv: " + "prepare to sleep 0x%lx jiffies\n", + nRelativeTimeoutJiffies); + +#ifdef CONFIG_TF_ZEBRA + if (!(*secure_is_idle)) { + if (tf_schedule_secure_world(comm, true) == STATUS_PENDING) { + finish_wait(&comm->wait_queue, &wait); + wait_prepared = false; + goto copy_answers; + } + tf_l4sec_clkdm_allow_idle(true); + *secure_is_idle = true; + } +#endif + + /* go to sleep */ + if (schedule_timeout(nRelativeTimeoutJiffies) == 0) + dprintk(KERN_INFO + "tf_send_recv: timeout expired\n"); + else + dprintk(KERN_INFO + "tf_send_recv: signal delivered\n"); + + finish_wait(&comm->wait_queue, &wait); + wait_prepared = false; + goto copy_answers; + +exit: + if (wait_prepared) { + finish_wait(&comm->wait_queue, &wait); + wait_prepared = false; + } + +#ifdef CONFIG_TF_ZEBRA + if ((!(*secure_is_idle)) && (result != -EIO)) { + if (tf_schedule_secure_world(comm, true) == STATUS_PENDING) + goto copy_answers; + + tf_l4sec_clkdm_allow_idle(true); + *secure_is_idle = true; + } +#endif + +#ifdef CONFIG_FREEZER + current->flags &= ~(PF_FREEZER_NOSIG); + current->flags |= (saved_flags & PF_FREEZER_NOSIG); +#endif + + return result; +} + +/* + * Sends the specified message through the specified communication channel. + * + * This function sends the message and waits for the corresponding answer + * It may return if a signal needs to be delivered. + * + * Returns zero upon successful completion, or an appropriate error code upon + * failure. + */ +int tf_send_receive(struct tf_comm *comm, + union tf_command *command, + union tf_answer *answer, + struct tf_connection *connection, + bool bKillable) +{ + int error; + struct tf_answer_struct answerStructure; +#ifdef CONFIG_SMP + long ret_affinity; + cpumask_t saved_cpu_mask; + cpumask_t local_cpu_mask = CPU_MASK_NONE; +#endif +#ifdef CONFIG_TF_ZEBRA + bool secure_is_idle = true; +#endif + + answerStructure.answer = answer; + answerStructure.answer_copied = false; + + if (command != NULL) + command->header.operation_id = (u32) &answerStructure; + + dprintk(KERN_INFO "tf_send_receive\n"); + +#ifdef CONFIG_TF_ZEBRA + if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) { + dprintk(KERN_ERR "tf_send_receive(%p): " + "Secure world not started\n", comm); + + return -EFAULT; + } +#endif + + if (test_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags)) != 0) { + dprintk(KERN_DEBUG + "tf_send_receive: Flag Terminating is set\n"); + return 0; + } + +#ifdef CONFIG_SMP + cpu_set(0, local_cpu_mask); + sched_getaffinity(0, &saved_cpu_mask); + ret_affinity = sched_setaffinity(0, &local_cpu_mask); + if (ret_affinity != 0) + dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity); +#endif + + + /* + * Send the command + */ + error = tf_send_recv(comm, + command, &answerStructure, connection, bKillable + #ifdef CONFIG_TF_ZEBRA + , &secure_is_idle + #endif + ); + + if (!bKillable && sigkill_pending()) { + if ((command->header.message_type == + TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) && + (answer->create_device_context.error_code == + S_SUCCESS)) { + + /* + * CREATE_DEVICE_CONTEXT was interrupted. + */ + dprintk(KERN_INFO "tf_send_receive: " + "sending DESTROY_DEVICE_CONTEXT\n"); + answerStructure.answer = answer; + answerStructure.answer_copied = false; + + command->header.message_type = + TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT; + command->header.message_size = + (sizeof(struct + tf_command_destroy_device_context) - + sizeof(struct tf_command_header))/sizeof(u32); + command->header.operation_id = + (u32) &answerStructure; + command->destroy_device_context.device_context = + answer->create_device_context. + device_context; + + goto destroy_context; + } + } + + if (error == 0) { + /* + * tf_send_recv returned Success. + */ + if (command->header.message_type == + TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) { + spin_lock(&(connection->state_lock)); + connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT; + spin_unlock(&(connection->state_lock)); + } else if (command->header.message_type == + TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) { + spin_lock(&(connection->state_lock)); + connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT; + spin_unlock(&(connection->state_lock)); + } + } else if (error == -EINTR) { + /* + * No command was sent, return failure. + */ + dprintk(KERN_ERR + "tf_send_receive: " + "tf_send_recv failed (error %d) !\n", + error); + } else if (error == -EIO) { + /* + * A command was sent but its answer is still pending. + */ + + /* means bKillable is true */ + dprintk(KERN_ERR + "tf_send_receive: " + "tf_send_recv interrupted (error %d)." + "Send DESTROY_DEVICE_CONTEXT.\n", error); + + /* Send the DESTROY_DEVICE_CONTEXT. */ + answerStructure.answer = answer; + answerStructure.answer_copied = false; + + command->header.message_type = + TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT; + command->header.message_size = + (sizeof(struct tf_command_destroy_device_context) - + sizeof(struct tf_command_header))/sizeof(u32); + command->header.operation_id = + (u32) &answerStructure; + command->destroy_device_context.device_context = + connection->device_context; + + error = tf_send_recv(comm, + command, &answerStructure, connection, false + #ifdef CONFIG_TF_ZEBRA + , &secure_is_idle + #endif + ); + if (error == -EINTR) { + /* + * Another thread already sent + * DESTROY_DEVICE_CONTEXT. + * We must still wait for the answer + * to the original command. + */ + command = NULL; + goto destroy_context; + } else { + /* An answer was received. + * Check if it is the answer + * to the DESTROY_DEVICE_CONTEXT. + */ + spin_lock(&comm->lock); + if (answer->header.message_type != + TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) { + answerStructure.answer_copied = false; + } + spin_unlock(&comm->lock); + if (!answerStructure.answer_copied) { + /* Answer to DESTROY_DEVICE_CONTEXT + * was not yet received. + * Wait for the answer. + */ + dprintk(KERN_INFO + "[pid=%d] tf_send_receive:" + "Answer to DESTROY_DEVICE_CONTEXT" + "not yet received.Retry\n", + current->pid); + command = NULL; + goto destroy_context; + } + } + } + + dprintk(KERN_INFO "tf_send_receive(): Message answer ready\n"); + goto exit; + +destroy_context: + error = tf_send_recv(comm, + command, &answerStructure, connection, false + #ifdef CONFIG_TF_ZEBRA + , &secure_is_idle + #endif + ); + + /* + * tf_send_recv cannot return an error because + * it's not killable and not within a connection + */ + BUG_ON(error != 0); + + /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */ + spin_lock(&(connection->state_lock)); + connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT; + spin_unlock(&(connection->state_lock)); + +exit: + +#ifdef CONFIG_SMP + ret_affinity = sched_setaffinity(0, &saved_cpu_mask); + if (ret_affinity != 0) + dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity); +#endif + return error; +} + +/*---------------------------------------------------------------------------- + * Power management + *----------------------------------------------------------------------------*/ + + +/* + * Handles all the power management calls. + * The operation is the type of power management + * operation to be performed. + * + * This routine will only return if a failure occured or if + * the required opwer management is of type "resume". + * "Hibernate" and "Shutdown" should lock when doing the + * corresponding SMC to the Secure World + */ +int tf_power_management(struct tf_comm *comm, + enum TF_POWER_OPERATION operation) +{ + u32 status; + int error = 0; + + dprintk(KERN_INFO "tf_power_management(%d)\n", operation); + +#ifdef CONFIG_TF_ZEBRA + if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) { + dprintk(KERN_INFO "tf_power_management(%p): " + "succeeded (not started)\n", comm); + + return 0; + } +#endif + + status = ((tf_read_reg32(&(comm->pBuffer->status_s)) + & TF_STATUS_POWER_STATE_MASK) + >> TF_STATUS_POWER_STATE_SHIFT); + + switch (operation) { + case TF_POWER_OPERATION_SHUTDOWN: + switch (status) { + case TF_POWER_MODE_ACTIVE: + error = tf_pm_shutdown(comm); + + if (error) { + dprintk(KERN_ERR "tf_power_management(): " + "Failed with error code 0x%08x\n", + error); + goto error; + } + break; + + default: + goto not_allowed; + } + break; + + case TF_POWER_OPERATION_HIBERNATE: + switch (status) { + case TF_POWER_MODE_ACTIVE: + error = tf_pm_hibernate(comm); + + if (error) { + dprintk(KERN_ERR "tf_power_management(): " + "Failed with error code 0x%08x\n", + error); + goto error; + } + break; + + default: + goto not_allowed; + } + break; + + case TF_POWER_OPERATION_RESUME: + error = tf_pm_resume(comm); + + if (error != 0) { + dprintk(KERN_ERR "tf_power_management(): " + "Failed with error code 0x%08x\n", + error); + goto error; + } + break; + } + + dprintk(KERN_INFO "tf_power_management(): succeeded\n"); + return 0; + +not_allowed: + dprintk(KERN_ERR "tf_power_management(): " + "Power command not allowed in current " + "Secure World state %d\n", status); + error = -ENOTTY; +error: + return error; +} diff --git a/security/smc/tf_comm.h b/security/smc/tf_comm.h new file mode 100644 index 0000000..48bd934 --- /dev/null +++ b/security/smc/tf_comm.h @@ -0,0 +1,204 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifndef __TF_COMM_H__ +#define __TF_COMM_H__ + +#include "tf_defs.h" +#include "tf_protocol.h" + +/*---------------------------------------------------------------------------- + * Misc + *----------------------------------------------------------------------------*/ + +void tf_set_current_time(struct tf_comm *comm); + +/* + * Atomic accesses to 32-bit variables in the L1 Shared buffer + */ +static inline u32 tf_read_reg32(const u32 *comm_buffer) +{ + u32 result; + + __asm__ __volatile__("@ tf_read_reg32\n" + "ldrex %0, [%1]\n" + : "=&r" (result) + : "r" (comm_buffer) + ); + + return result; +} + +static inline void tf_write_reg32(void *comm_buffer, u32 value) +{ + u32 tmp; + + __asm__ __volatile__("@ tf_write_reg32\n" + "1: ldrex %0, [%2]\n" + " strex %0, %1, [%2]\n" + " teq %0, #0\n" + " bne 1b" + : "=&r" (tmp) + : "r" (value), "r" (comm_buffer) + : "cc" + ); +} + +/* + * Atomic accesses to 64-bit variables in the L1 Shared buffer + */ +static inline u64 tf_read_reg64(void *comm_buffer) +{ + u64 result; + + __asm__ __volatile__("@ tf_read_reg64\n" + "ldrexd %0, [%1]\n" + : "=&r" (result) + : "r" (comm_buffer) + ); + + return result; +} + +static inline void tf_write_reg64(void *comm_buffer, u64 value) +{ + u64 tmp; + + __asm__ __volatile__("@ tf_write_reg64\n" + "1: ldrexd %0, [%2]\n" + " strexd %0, %1, [%2]\n" + " teq %0, #0\n" + " bne 1b" + : "=&r" (tmp) + : "r" (value), "r" (comm_buffer) + : "cc" + ); +} + +/*---------------------------------------------------------------------------- + * SMC operations + *----------------------------------------------------------------------------*/ + +/* RPC return values */ +#define RPC_NO 0x00 /* No RPC to execute */ +#define RPC_YIELD 0x01 /* Yield RPC */ +#define RPC_NON_YIELD 0x02 /* non-Yield RPC */ + +int tf_rpc_execute(struct tf_comm *comm); + +/*---------------------------------------------------------------------------- + * Shared memory related operations + *----------------------------------------------------------------------------*/ + +#define L1_DESCRIPTOR_FAULT (0x00000000) +#define L2_DESCRIPTOR_FAULT (0x00000000) + +#define L2_DESCRIPTOR_ADDR_MASK (0xFFFFF000) + +#define DESCRIPTOR_V13_12_MASK (0x3 << PAGE_SHIFT) +#define DESCRIPTOR_V13_12_GET(a) ((a & DESCRIPTOR_V13_12_MASK) >> PAGE_SHIFT) + +struct tf_coarse_page_table *tf_alloc_coarse_page_table( + struct tf_coarse_page_table_allocation_context *alloc_context, + u32 type); + +void tf_free_coarse_page_table( + struct tf_coarse_page_table_allocation_context *alloc_context, + struct tf_coarse_page_table *coarse_pg_table, + int force); + +void tf_init_coarse_page_table_allocator( + struct tf_coarse_page_table_allocation_context *alloc_context); + +void tf_release_coarse_page_table_allocator( + struct tf_coarse_page_table_allocation_context *alloc_context); + +struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor); + +u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm); + +void tf_cleanup_shared_memory( + struct tf_coarse_page_table_allocation_context *alloc_context, + struct tf_shmem_desc *shmem_desc, + u32 full_cleanup); + +int tf_fill_descriptor_table( + struct tf_coarse_page_table_allocation_context *alloc_context, + struct tf_shmem_desc *shmem_desc, + u32 buffer, + struct vm_area_struct **vmas, + u32 descriptors[TF_MAX_COARSE_PAGES], + u32 buffer_size, + u32 *buffer_start_offset, + bool in_user_space, + u32 flags, + u32 *descriptor_count); + +/*---------------------------------------------------------------------------- + * Standard communication operations + *----------------------------------------------------------------------------*/ + +#define STATUS_PENDING 0x00000001 + +int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit); + +int tf_send_receive( + struct tf_comm *comm, + union tf_command *command, + union tf_answer *answer, + struct tf_connection *connection, + bool bKillable); + + +/** + * get a pointer to the secure world description. + * This points directly into the L1 shared buffer + * and is valid only once the communication has + * been initialized + **/ +u8 *tf_get_description(struct tf_comm *comm); + +/*---------------------------------------------------------------------------- + * Power management + *----------------------------------------------------------------------------*/ + +enum TF_POWER_OPERATION { + TF_POWER_OPERATION_HIBERNATE = 1, + TF_POWER_OPERATION_SHUTDOWN = 2, + TF_POWER_OPERATION_RESUME = 3, +}; + +int tf_pm_hibernate(struct tf_comm *comm); +int tf_pm_resume(struct tf_comm *comm); +int tf_pm_shutdown(struct tf_comm *comm); + +int tf_power_management(struct tf_comm *comm, + enum TF_POWER_OPERATION operation); + + +/*---------------------------------------------------------------------------- + * Communication initialization and termination + *----------------------------------------------------------------------------*/ + +int tf_init(struct tf_comm *comm); + +void tf_terminate(struct tf_comm *comm); + + +#endif /* __TF_COMM_H__ */ diff --git a/security/smc/tf_comm_mshield.c b/security/smc/tf_comm_mshield.c new file mode 100644 index 0000000..c36473e --- /dev/null +++ b/security/smc/tf_comm_mshield.c @@ -0,0 +1,1013 @@ +/** + * Copyright (c) 2010 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <asm/div64.h> +#include <asm/system.h> +#include <asm/cputype.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/page-flags.h> +#include <linux/pagemap.h> +#include <linux/vmalloc.h> +#include <linux/version.h> +#include <linux/jiffies.h> +#include <linux/dma-mapping.h> +#include <linux/cpu.h> + +#include <asm/cacheflush.h> + +#include "tf_defs.h" +#include "tf_comm.h" +#include "tf_util.h" +#include "tf_conn.h" +#include "tf_zebra.h" +#include "tf_crypto.h" + +/*-------------------------------------------------------------------------- + * Internal constants + *-------------------------------------------------------------------------- */ + +/* RPC commands */ +#define RPC_CMD_YIELD 0x00 +#define RPC_CMD_INIT 0x01 +#define RPC_CMD_TRACE 0x02 + +/* RPC return values to secure world */ +#define RPC_SUCCESS 0x00000000 +#define RPC_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define RPC_ERROR_CONNECTION_PROTOCOL 0xFFFF3020 + +/* + * RPC call status + * + * 0: the secure world yielded due to an interrupt + * 1: the secure world yielded on an RPC (no public world thread is handling it) + * 2: the secure world yielded on an RPC and the response to that RPC is now in + * place + */ +#define RPC_ADVANCEMENT_NONE 0 +#define RPC_ADVANCEMENT_PENDING 1 +#define RPC_ADVANCEMENT_FINISHED 2 + +u32 g_RPC_advancement; +u32 g_RPC_parameters[4] = {0, 0, 0, 0}; +u32 g_secure_task_id; +u32 g_service_end; + +/* + * Secure ROMCode HAL API Identifiers + */ +#define API_HAL_SDP_RUNTIMEINIT_INDEX 0x04 +#define API_HAL_LM_PALOAD_INDEX 0x05 +#define API_HAL_LM_PAUNLOADALL_INDEX 0x07 +#define API_HAL_TASK_MGR_RPCINIT_INDEX 0x08 +#define API_HAL_KM_GETSECUREROMCODECRC_INDEX 0x0B +#define API_HAL_SEC_L3_RAM_RESIZE_INDEX 0x17 + +#define API_HAL_RET_VALUE_OK 0x0 + +/* SE entry flags */ +#define FLAG_START_HAL_CRITICAL 0x4 +#define FLAG_IRQFIQ_MASK 0x3 +#define FLAG_IRQ_ENABLE 0x2 +#define FLAG_FIQ_ENABLE 0x1 + +#define SMICODEPUB_IRQ_END 0xFE +#define SMICODEPUB_FIQ_END 0xFD +#define SMICODEPUB_RPC_END 0xFC + +#define SEC_RAM_SIZE_40KB 0x0000A000 +#define SEC_RAM_SIZE_48KB 0x0000C000 +#define SEC_RAM_SIZE_52KB 0x0000D000 +#define SEC_RAM_SIZE_60KB 0x0000F000 +#define SEC_RAM_SIZE_64KB 0x00010000 + +struct tf_ns_pa_info { + void *certificate; + void *parameters; + void *results; +}; + +/* + * AFY: I would like to remove the L0 buffer altogether: + * - you can use the L1 shared buffer to pass the RPC parameters and results: + * I think these easily fit in 256 bytes and you can use the area at + * offset 0x2C0-0x3BF in the L1 shared buffer + */ +struct tf_init_buffer { + u32 init_status; + u32 protocol_version; + u32 l1_shared_buffer_descr; + u32 backing_store_addr; + u32 backext_storage_addr; + u32 workspace_addr; + u32 workspace_size; + u32 properties_length; + u8 properties_buffer[1]; +}; + +#ifdef CONFIG_HAS_WAKELOCK +static struct wake_lock g_tf_wake_lock; +static u32 tf_wake_lock_count = 0; +#endif + +static struct clockdomain *smc_l4_sec_clkdm; +static u32 smc_l4_sec_clkdm_use_count = 0; + +static int __init tf_early_init(void) +{ + g_secure_task_id = 0; + + dprintk(KERN_INFO "SMC early init\n"); + + smc_l4_sec_clkdm = clkdm_lookup("l4_secure_clkdm"); + if (smc_l4_sec_clkdm == NULL) + return -EFAULT; + +#ifdef CONFIG_HAS_WAKELOCK + wake_lock_init(&g_tf_wake_lock, WAKE_LOCK_SUSPEND, + TF_DEVICE_BASE_NAME); +#endif + + return 0; +} +early_initcall(tf_early_init); + +/* + * Function responsible for formatting parameters to pass from NS world to + * S world + */ +u32 omap4_secure_dispatcher(u32 app_id, u32 flags, u32 nargs, + u32 arg1, u32 arg2, u32 arg3, u32 arg4) +{ + u32 ret; + unsigned long iflags; + u32 pub2sec_args[5] = {0, 0, 0, 0, 0}; + + /*dprintk(KERN_INFO "omap4_secure_dispatcher: " + "app_id=0x%08x, flags=0x%08x, nargs=%u\n", + app_id, flags, nargs);*/ + + /*if (nargs != 0) + dprintk(KERN_INFO + "omap4_secure_dispatcher: args=%08x, %08x, %08x, %08x\n", + arg1, arg2, arg3, arg4);*/ + + pub2sec_args[0] = nargs; + pub2sec_args[1] = arg1; + pub2sec_args[2] = arg2; + pub2sec_args[3] = arg3; + pub2sec_args[4] = arg4; + + /* Make sure parameters are visible to the secure world */ + dmac_flush_range((void *)pub2sec_args, + (void *)(((u32)(pub2sec_args)) + 5*sizeof(u32))); + outer_clean_range(__pa(pub2sec_args), + __pa(pub2sec_args) + 5*sizeof(u32)); + wmb(); + + /* + * Put L4 Secure clock domain to SW_WKUP so that modules are accessible + */ + tf_l4sec_clkdm_wakeup(false); + + local_irq_save(iflags); +#ifdef DEBUG + BUG_ON((read_mpidr() & 0x00000003) != 0); +#endif + /* proc_id is always 0 */ + ret = schedule_secure_world(app_id, 0, flags, __pa(pub2sec_args)); + local_irq_restore(iflags); + + /* Restore the HW_SUP on L4 Sec clock domain so hardware can idle */ + tf_l4sec_clkdm_allow_idle(false); + + /*dprintk(KERN_INFO "omap4_secure_dispatcher()\n");*/ + + return ret; +} + +/* Yields the Secure World */ +int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit) +{ + int status = 0; + int ret; + unsigned long iflags; + u32 appli_id; + + tf_set_current_time(comm); + + local_irq_save(iflags); + + switch (g_RPC_advancement) { + case RPC_ADVANCEMENT_NONE: + /* Return from IRQ */ + appli_id = SMICODEPUB_IRQ_END; + if (prepare_exit) + status = STATUS_PENDING; + break; + case RPC_ADVANCEMENT_PENDING: + /* nothing to do in this case */ + goto exit; + default: + case RPC_ADVANCEMENT_FINISHED: + if (prepare_exit) + goto exit; + appli_id = SMICODEPUB_RPC_END; + g_RPC_advancement = RPC_ADVANCEMENT_NONE; + break; + } + + g_service_end = 1; + /* yield to the Secure World */ + ret = omap4_secure_dispatcher(appli_id, /* app_id */ + 0, 0, /* flags, nargs */ + 0, 0, 0, 0); /* arg1, arg2, arg3, arg4 */ + if (g_service_end != 0) { + dprintk(KERN_ERR "Service End ret=%X\n", ret); + + if (ret == 0) { + dmac_flush_range((void *)comm->init_shared_buffer, + (void *)(((u32)(comm->init_shared_buffer)) + + PAGE_SIZE)); + outer_inv_range(__pa(comm->init_shared_buffer), + __pa(comm->init_shared_buffer) + + PAGE_SIZE); + + ret = ((struct tf_init_buffer *) + (comm->init_shared_buffer))->init_status; + + dprintk(KERN_ERR "SMC PA failure ret=%X\n", ret); + if (ret == 0) + ret = -EFAULT; + } + clear_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags); + omap4_secure_dispatcher(API_HAL_LM_PAUNLOADALL_INDEX, + FLAG_START_HAL_CRITICAL, 0, 0, 0, 0, 0); + status = ret; + } + +exit: + local_irq_restore(iflags); + + return status; +} + +/* Initializes the SE (SDP, SRAM resize, RPC handler) */ +static int tf_se_init(struct tf_comm *comm, + u32 sdp_backing_store_addr, u32 sdp_bkext_store_addr) +{ + int error; + unsigned int crc; + + if (comm->se_initialized) { + dprintk(KERN_INFO "tf_se_init: SE already initialized... " + "nothing to do\n"); + return 0; + } + + /* Secure CRC read */ + dprintk(KERN_INFO "tf_se_init: Secure CRC Read...\n"); + + crc = omap4_secure_dispatcher(API_HAL_KM_GETSECUREROMCODECRC_INDEX, + 0, 0, 0, 0, 0, 0); + printk(KERN_INFO "SMC: SecureCRC=0x%08X\n", crc); + + /* + * Flush caches before resize, just to be sure there is no + * pending public data writes back to SRAM that could trigger a + * security violation once their address space is marked as + * secure. + */ +#define OMAP4_SRAM_PA 0x40300000 +#define OMAP4_SRAM_SIZE 0xe000 + flush_cache_all(); + outer_flush_range(OMAP4_SRAM_PA, + OMAP4_SRAM_PA + OMAP4_SRAM_SIZE); + wmb(); + + /* SRAM resize */ + dprintk(KERN_INFO "tf_se_init: SRAM resize (52KB)...\n"); + error = omap4_secure_dispatcher(API_HAL_SEC_L3_RAM_RESIZE_INDEX, + FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1, + SEC_RAM_SIZE_52KB, 0, 0, 0); + + if (error == API_HAL_RET_VALUE_OK) { + dprintk(KERN_INFO "tf_se_init: SRAM resize OK\n"); + } else { + dprintk(KERN_ERR "tf_se_init: " + "SRAM resize failed [0x%x]\n", error); + goto error; + } + + /* SDP init */ + dprintk(KERN_INFO "tf_se_init: SDP runtime init..." + "(sdp_backing_store_addr=%x, sdp_bkext_store_addr=%x)\n", + sdp_backing_store_addr, sdp_bkext_store_addr); + error = omap4_secure_dispatcher(API_HAL_SDP_RUNTIMEINIT_INDEX, + FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 2, + sdp_backing_store_addr, sdp_bkext_store_addr, 0, 0); + + if (error == API_HAL_RET_VALUE_OK) { + dprintk(KERN_INFO "tf_se_init: SDP runtime init OK\n"); + } else { + dprintk(KERN_ERR "tf_se_init: " + "SDP runtime init failed [0x%x]\n", error); + goto error; + } + + /* RPC init */ + dprintk(KERN_INFO "tf_se_init: RPC init...\n"); + error = omap4_secure_dispatcher(API_HAL_TASK_MGR_RPCINIT_INDEX, + FLAG_START_HAL_CRITICAL, 1, + (u32) (u32(*const) (u32, u32, u32, u32)) &rpc_handler, 0, 0, 0); + + if (error == API_HAL_RET_VALUE_OK) { + dprintk(KERN_INFO "tf_se_init: RPC init OK\n"); + } else { + dprintk(KERN_ERR "tf_se_init: " + "RPC init failed [0x%x]\n", error); + goto error; + } + + comm->se_initialized = true; + + return 0; + +error: + return -EFAULT; +} + +/* Check protocol version returned by the PA */ +static u32 tf_rpc_init(struct tf_comm *comm) +{ + u32 protocol_version; + u32 rpc_error = RPC_SUCCESS; + + dprintk(KERN_INFO "tf_rpc_init(%p)\n", comm); + + spin_lock(&(comm->lock)); + + dmac_flush_range((void *)comm->init_shared_buffer, + (void *)(((u32)(comm->init_shared_buffer)) + PAGE_SIZE)); + outer_inv_range(__pa(comm->init_shared_buffer), + __pa(comm->init_shared_buffer) + PAGE_SIZE); + + protocol_version = ((struct tf_init_buffer *) + (comm->init_shared_buffer))->protocol_version; + + if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version)) + != TF_S_PROTOCOL_MAJOR_VERSION) { + dprintk(KERN_ERR "SMC: Unsupported SMC Protocol PA Major " + "Version (0x%02x, expected 0x%02x)!\n", + GET_PROTOCOL_MAJOR_VERSION(protocol_version), + TF_S_PROTOCOL_MAJOR_VERSION); + rpc_error = RPC_ERROR_CONNECTION_PROTOCOL; + } else { + rpc_error = RPC_SUCCESS; + } + + spin_unlock(&(comm->lock)); + + register_smc_public_crypto_digest(); + register_smc_public_crypto_aes(); + + return rpc_error; +} + +static u32 tf_rpc_trace(struct tf_comm *comm) +{ + dprintk(KERN_INFO "tf_rpc_trace(%p)\n", comm); + +#ifdef CONFIG_SECURE_TRACE + spin_lock(&(comm->lock)); + printk(KERN_INFO "SMC PA: %s", + comm->pBuffer->rpc_trace_buffer); + spin_unlock(&(comm->lock)); +#endif + return RPC_SUCCESS; +} + +/* + * Handles RPC calls + * + * Returns: + * - RPC_NO if there was no RPC to execute + * - RPC_YIELD if there was a Yield RPC + * - RPC_NON_YIELD if there was a non-Yield RPC + */ + +int tf_rpc_execute(struct tf_comm *comm) +{ + u32 rpc_command; + u32 rpc_error = RPC_NO; + +#ifdef DEBUG + BUG_ON((read_mpidr() & 0x00000003) != 0); +#endif + + /* Lock the RPC */ + mutex_lock(&(comm->rpc_mutex)); + + rpc_command = g_RPC_parameters[1]; + + if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) { + dprintk(KERN_INFO "tf_rpc_execute: " + "Executing CMD=0x%x\n", + g_RPC_parameters[1]); + + switch (rpc_command) { + case RPC_CMD_YIELD: + dprintk(KERN_INFO "tf_rpc_execute: " + "RPC_CMD_YIELD\n"); + + rpc_error = RPC_YIELD; + g_RPC_parameters[0] = RPC_SUCCESS; + break; + + case RPC_CMD_TRACE: + rpc_error = RPC_NON_YIELD; + g_RPC_parameters[0] = tf_rpc_trace(comm); + break; + + default: + if (tf_crypto_execute_rpc(rpc_command, + comm->pBuffer->rpc_cus_buffer) != 0) + g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS; + else + g_RPC_parameters[0] = RPC_SUCCESS; + rpc_error = RPC_NON_YIELD; + break; + } + g_RPC_advancement = RPC_ADVANCEMENT_FINISHED; + } + + mutex_unlock(&(comm->rpc_mutex)); + + dprintk(KERN_INFO "tf_rpc_execute: Return 0x%x\n", + rpc_error); + + return rpc_error; +} + +/*-------------------------------------------------------------------------- + * L4 SEC Clock domain handling + *-------------------------------------------------------------------------- */ + +static DEFINE_SPINLOCK(clk_lock); +void tf_l4sec_clkdm_wakeup(bool wakelock) +{ + unsigned long flags; + spin_lock_irqsave(&clk_lock, flags); +#ifdef CONFIG_HAS_WAKELOCK + if (wakelock) { + tf_wake_lock_count++; + wake_lock(&g_tf_wake_lock); + } +#endif + smc_l4_sec_clkdm_use_count++; + clkdm_wakeup(smc_l4_sec_clkdm); + spin_unlock_irqrestore(&clk_lock, flags); +} + +void tf_l4sec_clkdm_allow_idle(bool wakeunlock) +{ + unsigned long flags; + spin_lock_irqsave(&clk_lock, flags); + smc_l4_sec_clkdm_use_count--; + if (smc_l4_sec_clkdm_use_count == 0) + clkdm_allow_idle(smc_l4_sec_clkdm); +#ifdef CONFIG_HAS_WAKELOCK + if (wakeunlock){ + tf_wake_lock_count--; + if (tf_wake_lock_count == 0) + wake_unlock(&g_tf_wake_lock); + } +#endif + spin_unlock_irqrestore(&clk_lock, flags); +} + +/*-------------------------------------------------------------------------- + * Power management + *-------------------------------------------------------------------------- */ + /* + * Perform a Secure World shutdown operation. + * The routine does not return if the operation succeeds. + * the routine returns an appropriate error code if + * the operation fails. + */ +int tf_pm_shutdown(struct tf_comm *comm) +{ + + int error; + union tf_command command; + union tf_answer answer; + + dprintk(KERN_INFO "tf_pm_shutdown()\n"); + + memset(&command, 0, sizeof(command)); + + command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT; + command.header.message_size = + (sizeof(struct tf_command_management) - + sizeof(struct tf_command_header))/sizeof(u32); + + command.management.command = TF_MANAGEMENT_SHUTDOWN; + + error = tf_send_receive( + comm, + &command, + &answer, + NULL, + false); + + if (error != 0) { + dprintk(KERN_ERR "tf_pm_shutdown(): " + "tf_send_receive failed (error %d)!\n", + error); + return error; + } + +#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT + if (answer.header.error_code != 0) + dprintk(KERN_ERR "tf_driver: shutdown failed.\n"); + else + dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n"); +#endif + + return answer.header.error_code; +} + + +int tf_pm_hibernate(struct tf_comm *comm) +{ + struct tf_device *dev = tf_get_device(); + + dprintk(KERN_INFO "tf_pm_hibernate()\n"); + + /* + * As we enter in CORE OFF, the keys are going to be cleared. + * Reset the global key context. + * When the system leaves CORE OFF, this will force the driver to go + * through the secure world which will reconfigure the accelerators. + */ + dev->aes1_key_context = 0; + dev->des_key_context = 0; +#ifndef CONFIG_SMC_KERNEL_CRYPTO + dev->sham1_is_public = false; +#endif + return 0; +} + +#ifdef CONFIG_SMC_KERNEL_CRYPTO +#define DELAYED_RESUME_NONE 0 +#define DELAYED_RESUME_PENDING 1 +#define DELAYED_RESUME_ONGOING 2 + +static DEFINE_SPINLOCK(tf_delayed_resume_lock); +static int tf_need_delayed_resume = DELAYED_RESUME_NONE; + +int tf_delayed_secure_resume(void) +{ + int ret; + union tf_command message; + union tf_answer answer; + struct tf_device *dev = tf_get_device(); + + spin_lock(&tf_delayed_resume_lock); + if (likely(tf_need_delayed_resume == DELAYED_RESUME_NONE)) { + spin_unlock(&tf_delayed_resume_lock); + return 0; + } + + if (unlikely(tf_need_delayed_resume == DELAYED_RESUME_ONGOING)) { + spin_unlock(&tf_delayed_resume_lock); + + /* + * Wait for the other caller to actually finish the delayed + * resume operation + */ + while (tf_need_delayed_resume != DELAYED_RESUME_NONE) + cpu_relax(); + + return 0; + } + + tf_need_delayed_resume = DELAYED_RESUME_ONGOING; + spin_unlock(&tf_delayed_resume_lock); + + /* + * When the system leaves CORE OFF, HWA are configured as secure. We + * need them as public for the Linux Crypto API. + */ + memset(&message, 0, sizeof(message)); + + message.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT; + message.header.message_size = + (sizeof(struct tf_command_management) - + sizeof(struct tf_command_header))/sizeof(u32); + message.management.command = + TF_MANAGEMENT_RESUME_FROM_CORE_OFF; + + ret = tf_send_receive(&dev->sm, &message, &answer, NULL, false); + if (ret) { + printk(KERN_ERR "tf_pm_resume(%p): " + "tf_send_receive failed (error %d)!\n", + &dev->sm, ret); + + unregister_smc_public_crypto_digest(); + unregister_smc_public_crypto_aes(); + return ret; + } + + if (answer.header.error_code) { + unregister_smc_public_crypto_digest(); + unregister_smc_public_crypto_aes(); + } + + spin_lock(&tf_delayed_resume_lock); + tf_need_delayed_resume = DELAYED_RESUME_NONE; + spin_unlock(&tf_delayed_resume_lock); + + return answer.header.error_code; +} +#endif + +int tf_pm_resume(struct tf_comm *comm) +{ + + dprintk(KERN_INFO "tf_pm_resume()\n"); + #if 0 + { + void *workspace_va; + struct tf_device *dev = tf_get_device(); + workspace_va = ioremap(dev->workspace_addr, + dev->workspace_size); + printk(KERN_INFO + "Read first word of workspace [0x%x]\n", + *(uint32_t *)workspace_va); + } + #endif + +#ifdef CONFIG_SMC_KERNEL_CRYPTO + spin_lock(&tf_delayed_resume_lock); + tf_need_delayed_resume = DELAYED_RESUME_PENDING; + spin_unlock(&tf_delayed_resume_lock); +#endif + return 0; +} + +/*-------------------------------------------------------------------------- + * Initialization + *-------------------------------------------------------------------------- */ + +int tf_init(struct tf_comm *comm) +{ + spin_lock_init(&(comm->lock)); + comm->flags = 0; + comm->pBuffer = NULL; + comm->init_shared_buffer = NULL; + + comm->se_initialized = false; + + init_waitqueue_head(&(comm->wait_queue)); + mutex_init(&(comm->rpc_mutex)); + + if (tf_crypto_init() != PUBLIC_CRYPTO_OPERATION_SUCCESS) + return -EFAULT; + + if (omap_type() == OMAP2_DEVICE_TYPE_GP) { + register_smc_public_crypto_digest(); + register_smc_public_crypto_aes(); + } + + return 0; +} + +/* Start the SMC PA */ +int tf_start(struct tf_comm *comm, + u32 workspace_addr, u32 workspace_size, + u8 *pa_buffer, u32 pa_size, + u8 *properties_buffer, u32 properties_length) +{ + struct tf_init_buffer *init_shared_buffer = NULL; + struct tf_l1_shared_buffer *l1_shared_buffer = NULL; + u32 l1_shared_buffer_descr; + struct tf_ns_pa_info pa_info; + int ret; + u32 descr; + u32 sdp_backing_store_addr; + u32 sdp_bkext_store_addr; +#ifdef CONFIG_SMP + long ret_affinity; + cpumask_t saved_cpu_mask; + cpumask_t local_cpu_mask = CPU_MASK_NONE; + + /* OMAP4 Secure ROM Code can only be called from CPU0. */ + cpu_set(0, local_cpu_mask); + sched_getaffinity(0, &saved_cpu_mask); + ret_affinity = sched_setaffinity(0, &local_cpu_mask); + if (ret_affinity != 0) + dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity); +#endif + + tf_l4sec_clkdm_wakeup(true); + + workspace_size -= SZ_1M; + sdp_backing_store_addr = workspace_addr + workspace_size; + workspace_size -= 0x20000; + sdp_bkext_store_addr = workspace_addr + workspace_size; + + /* + * Implementation notes: + * + * 1/ The PA buffer (pa_buffer)is now owned by this function. + * In case of error, it is responsible for releasing the buffer. + * + * 2/ The PA Info and PA Buffer will be freed through a RPC call + * at the beginning of the PA entry in the SE. + */ + + if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) { + dprintk(KERN_ERR "tf_start(%p): " + "The SMC PA is already started\n", comm); + + ret = -EFAULT; + goto error1; + } + + if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) { + dprintk(KERN_ERR "tf_start(%p): " + "The L1 structure size is incorrect!\n", comm); + ret = -EFAULT; + goto error1; + } + + ret = tf_se_init(comm, sdp_backing_store_addr, + sdp_bkext_store_addr); + if (ret != 0) { + dprintk(KERN_ERR "tf_start(%p): " + "SE initialization failed\n", comm); + goto error1; + } + + init_shared_buffer = + (struct tf_init_buffer *) + internal_get_zeroed_page(GFP_KERNEL); + if (init_shared_buffer == NULL) { + dprintk(KERN_ERR "tf_start(%p): " + "Ouf of memory!\n", comm); + + ret = -ENOMEM; + goto error1; + } + /* Ensure the page is mapped */ + __set_page_locked(virt_to_page(init_shared_buffer)); + + l1_shared_buffer = + (struct tf_l1_shared_buffer *) + internal_get_zeroed_page(GFP_KERNEL); + + if (l1_shared_buffer == NULL) { + dprintk(KERN_ERR "tf_start(%p): " + "Ouf of memory!\n", comm); + + ret = -ENOMEM; + goto error1; + } + /* Ensure the page is mapped */ + __set_page_locked(virt_to_page(l1_shared_buffer)); + + dprintk(KERN_INFO "tf_start(%p): " + "L0SharedBuffer={0x%08x, 0x%08x}\n", comm, + (u32) init_shared_buffer, (u32) __pa(init_shared_buffer)); + dprintk(KERN_INFO "tf_start(%p): " + "L1SharedBuffer={0x%08x, 0x%08x}\n", comm, + (u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer)); + + descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer, + current->mm); + l1_shared_buffer_descr = ( + ((u32) __pa(l1_shared_buffer) & 0xFFFFF000) | + (descr & 0xFFF)); + + pa_info.certificate = (void *) __pa(pa_buffer); + pa_info.parameters = (void *) __pa(init_shared_buffer); + pa_info.results = (void *) __pa(init_shared_buffer); + + init_shared_buffer->l1_shared_buffer_descr = l1_shared_buffer_descr; + + init_shared_buffer->backing_store_addr = sdp_backing_store_addr; + init_shared_buffer->backext_storage_addr = sdp_bkext_store_addr; + init_shared_buffer->workspace_addr = workspace_addr; + init_shared_buffer->workspace_size = workspace_size; + + init_shared_buffer->properties_length = properties_length; + if (properties_length == 0) { + init_shared_buffer->properties_buffer[0] = 0; + } else { + /* Test for overflow */ + if ((init_shared_buffer->properties_buffer + + properties_length + > init_shared_buffer->properties_buffer) && + (properties_length <= + init_shared_buffer->properties_length)) { + memcpy(init_shared_buffer->properties_buffer, + properties_buffer, + properties_length); + } else { + dprintk(KERN_INFO "tf_start(%p): " + "Configuration buffer size from userland is " + "incorrect(%d, %d)\n", + comm, (u32) properties_length, + init_shared_buffer->properties_length); + ret = -EFAULT; + goto error1; + } + } + + dprintk(KERN_INFO "tf_start(%p): " + "System Configuration (%d bytes)\n", comm, + init_shared_buffer->properties_length); + dprintk(KERN_INFO "tf_start(%p): " + "Starting PA (%d bytes)...\n", comm, pa_size); + + /* + * Make sure all data is visible to the secure world + */ + dmac_flush_range((void *)init_shared_buffer, + (void *)(((u32)init_shared_buffer) + PAGE_SIZE)); + outer_clean_range(__pa(init_shared_buffer), + __pa(init_shared_buffer) + PAGE_SIZE); + + dmac_flush_range((void *)pa_buffer, + (void *)(pa_buffer + pa_size)); + outer_clean_range(__pa(pa_buffer), + __pa(pa_buffer) + pa_size); + + dmac_flush_range((void *)&pa_info, + (void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info))); + outer_clean_range(__pa(&pa_info), + __pa(&pa_info) + sizeof(struct tf_ns_pa_info)); + wmb(); + + spin_lock(&(comm->lock)); + comm->init_shared_buffer = init_shared_buffer; + comm->pBuffer = l1_shared_buffer; + spin_unlock(&(comm->lock)); + init_shared_buffer = NULL; + l1_shared_buffer = NULL; + + /* + * Set the OS current time in the L1 shared buffer first. The secure + * world uses it as itw boot reference time. + */ + tf_set_current_time(comm); + + /* Workaround for issue #6081 */ + if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS) + disable_nonboot_cpus(); + + /* + * Start the SMC PA + */ + ret = omap4_secure_dispatcher(API_HAL_LM_PALOAD_INDEX, + FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1, + __pa(&pa_info), 0, 0, 0); + if (ret != API_HAL_RET_VALUE_OK) { + printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n", + ret); + goto error2; + } + + /* Loop until the first S Yield RPC is received */ +loop: + mutex_lock(&(comm->rpc_mutex)); + + if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) { + dprintk(KERN_INFO "tf_rpc_execute: " + "Executing CMD=0x%x\n", + g_RPC_parameters[1]); + + switch (g_RPC_parameters[1]) { + case RPC_CMD_YIELD: + dprintk(KERN_INFO "tf_rpc_execute: " + "RPC_CMD_YIELD\n"); + set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, + &(comm->flags)); + g_RPC_parameters[0] = RPC_SUCCESS; + break; + + case RPC_CMD_INIT: + dprintk(KERN_INFO "tf_rpc_execute: " + "RPC_CMD_INIT\n"); + g_RPC_parameters[0] = tf_rpc_init(comm); + break; + + case RPC_CMD_TRACE: + g_RPC_parameters[0] = tf_rpc_trace(comm); + break; + + default: + g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS; + break; + } + g_RPC_advancement = RPC_ADVANCEMENT_FINISHED; + } + + mutex_unlock(&(comm->rpc_mutex)); + + ret = tf_schedule_secure_world(comm, false); + if (ret != 0) { + printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n", + ret); + goto error2; + } + + if (!test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) + goto loop; + + set_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags); + wake_up(&(comm->wait_queue)); + ret = 0; + + #if 0 + { + void *workspace_va; + workspace_va = ioremap(workspace_addr, workspace_size); + printk(KERN_INFO + "Read first word of workspace [0x%x]\n", + *(uint32_t *)workspace_va); + } + #endif + + /* Workaround for issue #6081 */ + if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS) + enable_nonboot_cpus(); + + goto exit; + +error2: + /* Workaround for issue #6081 */ + if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS) + enable_nonboot_cpus(); + + spin_lock(&(comm->lock)); + l1_shared_buffer = comm->pBuffer; + init_shared_buffer = comm->init_shared_buffer; + comm->pBuffer = NULL; + comm->init_shared_buffer = NULL; + spin_unlock(&(comm->lock)); + +error1: + if (init_shared_buffer != NULL) { + __clear_page_locked(virt_to_page(init_shared_buffer)); + internal_free_page((unsigned long) init_shared_buffer); + } + if (l1_shared_buffer != NULL) { + __clear_page_locked(virt_to_page(l1_shared_buffer)); + internal_free_page((unsigned long) l1_shared_buffer); + } + +exit: +#ifdef CONFIG_SMP + ret_affinity = sched_setaffinity(0, &saved_cpu_mask); + if (ret_affinity != 0) + dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity); +#endif + + tf_l4sec_clkdm_allow_idle(true); + + if (ret > 0) + ret = -EFAULT; + + return ret; +} + +void tf_terminate(struct tf_comm *comm) +{ + dprintk(KERN_INFO "tf_terminate(%p)\n", comm); + + spin_lock(&(comm->lock)); + + tf_crypto_terminate(); + + spin_unlock(&(comm->lock)); +} diff --git a/security/smc/tf_conn.c b/security/smc/tf_conn.c new file mode 100644 index 0000000..4ab7a0a --- /dev/null +++ b/security/smc/tf_conn.c @@ -0,0 +1,1647 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <asm/atomic.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/types.h> + +#include "s_version.h" + +#include "tf_protocol.h" +#include "tf_defs.h" +#include "tf_util.h" +#include "tf_comm.h" +#include "tf_conn.h" + +#ifdef CONFIG_TF_ZEBRA +#include "tf_crypto.h" +#endif + +/*---------------------------------------------------------------------------- + * Management of the shared memory blocks. + * + * Shared memory blocks are the blocks registered through + * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT + *----------------------------------------------------------------------------*/ + +/** + * Unmaps a shared memory + **/ +static void tf_unmap_shmem( + struct tf_connection *connection, + struct tf_shmem_desc *shmem_desc, + u32 full_cleanup) +{ + /* check shmem_desc contains a descriptor */ + if (shmem_desc == NULL) + return; + + dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc); + +retry: + mutex_lock(&(connection->shmem_mutex)); + if (atomic_read(&shmem_desc->ref_count) > 1) { + /* + * Shared mem still in use, wait for other operations completion + * before actually unmapping it. + */ + dprintk(KERN_INFO "Descriptor in use\n"); + mutex_unlock(&(connection->shmem_mutex)); + schedule(); + goto retry; + } + + tf_cleanup_shared_memory( + &(connection->cpt_alloc_context), + shmem_desc, + full_cleanup); + + list_del(&(shmem_desc->list)); + + if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) || + (full_cleanup != 0)) { + internal_kfree(shmem_desc); + + atomic_dec(&(connection->shmem_count)); + } else { + /* + * This is a preallocated shared memory, add to free list + * Since the device context is unmapped last, it is + * always the first element of the free list if no + * device context has been created + */ + shmem_desc->block_identifier = 0; + list_add(&(shmem_desc->list), &(connection->free_shmem_list)); + } + + mutex_unlock(&(connection->shmem_mutex)); +} + + +/** + * Find the first available slot for a new block of shared memory + * and map the user buffer. + * Update the descriptors to L1 descriptors + * Update the buffer_start_offset and buffer_size fields + * shmem_desc is updated to the mapped shared memory descriptor + **/ +static int tf_map_shmem( + struct tf_connection *connection, + u32 buffer, + /* flags for read-write access rights on the memory */ + u32 flags, + bool in_user_space, + u32 descriptors[TF_MAX_COARSE_PAGES], + u32 *buffer_start_offset, + u32 buffer_size, + struct tf_shmem_desc **shmem_desc, + u32 *descriptor_count) +{ + struct tf_shmem_desc *desc = NULL; + int error; + + dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n", + connection, + (void *) buffer, + flags); + + mutex_lock(&(connection->shmem_mutex)); + + /* + * Check the list of free shared memory + * is not empty + */ + if (list_empty(&(connection->free_shmem_list))) { + if (atomic_read(&(connection->shmem_count)) == + TF_SHMEM_MAX_COUNT) { + printk(KERN_ERR "tf_map_shmem(%p):" + " maximum shared memories already registered\n", + connection); + error = -ENOMEM; + goto error; + } + + /* no descriptor available, allocate a new one */ + + desc = (struct tf_shmem_desc *) internal_kmalloc( + sizeof(*desc), GFP_KERNEL); + if (desc == NULL) { + printk(KERN_ERR "tf_map_shmem(%p):" + " failed to allocate descriptor\n", + connection); + error = -ENOMEM; + goto error; + } + + /* Initialize the structure */ + desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM; + atomic_set(&desc->ref_count, 1); + INIT_LIST_HEAD(&(desc->list)); + + atomic_inc(&(connection->shmem_count)); + } else { + /* take the first free shared memory descriptor */ + desc = list_first_entry(&(connection->free_shmem_list), + struct tf_shmem_desc, list); + list_del(&(desc->list)); + } + + /* Add the descriptor to the used list */ + list_add(&(desc->list), &(connection->used_shmem_list)); + + error = tf_fill_descriptor_table( + &(connection->cpt_alloc_context), + desc, + buffer, + connection->vmas, + descriptors, + buffer_size, + buffer_start_offset, + in_user_space, + flags, + descriptor_count); + + if (error != 0) { + dprintk(KERN_ERR "tf_map_shmem(%p):" + " tf_fill_descriptor_table failed with error " + "code %d!\n", + connection, + error); + goto error; + } + desc->pBuffer = (u8 *) buffer; + + /* + * Successful completion. + */ + *shmem_desc = desc; + mutex_unlock(&(connection->shmem_mutex)); + dprintk(KERN_DEBUG "tf_map_shmem: success\n"); + return 0; + + + /* + * Error handling. + */ +error: + mutex_unlock(&(connection->shmem_mutex)); + dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n", + error); + + tf_unmap_shmem( + connection, + desc, + 0); + + return error; +} + + + +/* This function is a copy of the find_vma() function +in linux kernel 2.6.15 version with some fixes : + - memory block may end on vm_end + - check the full memory block is in the memory area + - guarantee NULL is returned if no memory area is found */ +struct vm_area_struct *tf_find_vma(struct mm_struct *mm, + unsigned long addr, unsigned long size) +{ + struct vm_area_struct *vma = NULL; + + dprintk(KERN_INFO + "tf_find_vma addr=0x%lX size=0x%lX\n", addr, size); + + if (mm) { + /* Check the cache first. */ + /* (Cache hit rate is typically around 35%.) */ + vma = mm->mmap_cache; + if (!(vma && vma->vm_end >= (addr+size) && + vma->vm_start <= addr)) { + struct rb_node *rb_node; + + rb_node = mm->mm_rb.rb_node; + vma = NULL; + + while (rb_node) { + struct vm_area_struct *vma_tmp; + + vma_tmp = rb_entry(rb_node, + struct vm_area_struct, vm_rb); + + dprintk(KERN_INFO + "vma_tmp->vm_start=0x%lX" + "vma_tmp->vm_end=0x%lX\n", + vma_tmp->vm_start, + vma_tmp->vm_end); + + if (vma_tmp->vm_end >= (addr+size)) { + vma = vma_tmp; + if (vma_tmp->vm_start <= addr) + break; + + rb_node = rb_node->rb_left; + } else { + rb_node = rb_node->rb_right; + } + } + + if (vma) + mm->mmap_cache = vma; + if (rb_node == NULL) + vma = NULL; + } + } + return vma; +} + +static int tf_validate_shmem_and_flags( + u32 shmem, + u32 shmem_size, + u32 flags) +{ + struct vm_area_struct *vma; + u32 chunk; + + if (shmem_size == 0) + /* This is always valid */ + return 0; + + if ((shmem + shmem_size) < shmem) + /* Overflow */ + return -EINVAL; + + down_read(¤t->mm->mmap_sem); + + /* + * When looking for a memory address, split buffer into chunks of + * size=PAGE_SIZE. + */ + chunk = PAGE_SIZE - (shmem & (PAGE_SIZE-1)); + if (chunk > shmem_size) + chunk = shmem_size; + + do { + vma = tf_find_vma(current->mm, shmem, chunk); + + if (vma == NULL) { + dprintk(KERN_ERR "%s: area not found\n", __func__); + goto error; + } + + if (flags & TF_SHMEM_TYPE_READ) + if (!(vma->vm_flags & VM_READ)) { + dprintk(KERN_ERR "%s: no read permission\n", + __func__); + goto error; + } + if (flags & TF_SHMEM_TYPE_WRITE) + if (!(vma->vm_flags & VM_WRITE)) { + dprintk(KERN_ERR "%s: no write permission\n", + __func__); + goto error; + } + + shmem_size -= chunk; + shmem += chunk; + chunk = (shmem_size <= PAGE_SIZE ? + shmem_size : PAGE_SIZE); + } while (shmem_size != 0); + + up_read(¤t->mm->mmap_sem); + return 0; + +error: + up_read(¤t->mm->mmap_sem); + return -EFAULT; +} + + +static int tf_map_temp_shmem(struct tf_connection *connection, + struct tf_command_param_temp_memref *temp_memref, + u32 param_type, + struct tf_shmem_desc **shmem_desc) +{ + u32 flags; + u32 error = S_SUCCESS; + bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL; + + dprintk(KERN_INFO "tf_map_temp_shmem(%p, " + "0x%08x[size=0x%08x], offset=0x%08x)\n", + connection, + temp_memref->descriptor, + temp_memref->size, + temp_memref->offset); + + switch (param_type) { + case TF_PARAM_TYPE_MEMREF_TEMP_INPUT: + flags = TF_SHMEM_TYPE_READ; + break; + case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT: + flags = TF_SHMEM_TYPE_WRITE; + break; + case TF_PARAM_TYPE_MEMREF_TEMP_INOUT: + flags = TF_SHMEM_TYPE_WRITE | TF_SHMEM_TYPE_READ; + break; + default: + error = -EINVAL; + goto error; + } + + if (temp_memref->descriptor == 0) { + /* NULL tmpref */ + temp_memref->offset = 0; + *shmem_desc = NULL; + } else if ((temp_memref->descriptor != 0) && + (temp_memref->size == 0)) { + /* Empty tmpref */ + temp_memref->offset = temp_memref->descriptor; + temp_memref->descriptor = 0; + temp_memref->size = 0; + *shmem_desc = NULL; + } else { + /* Map the temp shmem block */ + + u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES]; + u32 descriptorCount; + + if (in_user_space) { + error = tf_validate_shmem_and_flags( + temp_memref->descriptor, + temp_memref->size, + flags); + if (error != 0) + goto error; + } + + error = tf_map_shmem( + connection, + temp_memref->descriptor, + flags, + in_user_space, + shared_mem_descriptors, + &(temp_memref->offset), + temp_memref->size, + shmem_desc, + &descriptorCount); + temp_memref->descriptor = shared_mem_descriptors[0]; + } + +error: + return error; +} + +/* + * Clean up a list of shared memory descriptors. + */ +static void tf_shared_memory_cleanup_list( + struct tf_connection *connection, + struct list_head *shmem_desc_list) +{ + while (!list_empty(shmem_desc_list)) { + struct tf_shmem_desc *shmem_desc; + + shmem_desc = list_first_entry(shmem_desc_list, + struct tf_shmem_desc, list); + + tf_unmap_shmem(connection, shmem_desc, 1); + } +} + + +/* + * Clean up the shared memory information in the connection. + * Releases all allocated pages. + */ +static void tf_cleanup_shared_memories(struct tf_connection *connection) +{ + /* clean up the list of used and free descriptors. + * done outside the mutex, because tf_unmap_shmem already + * mutex()ed + */ + tf_shared_memory_cleanup_list(connection, + &connection->used_shmem_list); + tf_shared_memory_cleanup_list(connection, + &connection->free_shmem_list); + + mutex_lock(&(connection->shmem_mutex)); + + /* Free the Vmas page */ + if (connection->vmas) { + internal_free_page((unsigned long) connection->vmas); + connection->vmas = NULL; + } + + tf_release_coarse_page_table_allocator( + &(connection->cpt_alloc_context)); + + mutex_unlock(&(connection->shmem_mutex)); +} + + +/* + * Initialize the shared memory in a connection. + * Allocates the minimum memory to be provided + * for shared memory management + */ +int tf_init_shared_memory(struct tf_connection *connection) +{ + int error; + int i; + int coarse_page_index; + + /* + * We only need to initialize special elements and attempt to allocate + * the minimum shared memory descriptors we want to support + */ + + mutex_init(&(connection->shmem_mutex)); + INIT_LIST_HEAD(&(connection->free_shmem_list)); + INIT_LIST_HEAD(&(connection->used_shmem_list)); + atomic_set(&(connection->shmem_count), 0); + + tf_init_coarse_page_table_allocator( + &(connection->cpt_alloc_context)); + + + /* + * Preallocate 3 pages to increase the chances that a connection + * succeeds in allocating shared mem + */ + for (i = 0; + i < 3; + i++) { + struct tf_shmem_desc *shmem_desc = + (struct tf_shmem_desc *) internal_kmalloc( + sizeof(*shmem_desc), GFP_KERNEL); + + if (shmem_desc == NULL) { + printk(KERN_ERR "tf_init_shared_memory(%p):" + " failed to pre allocate descriptor %d\n", + connection, + i); + error = -ENOMEM; + goto error; + } + + for (coarse_page_index = 0; + coarse_page_index < TF_MAX_COARSE_PAGES; + coarse_page_index++) { + struct tf_coarse_page_table *coarse_pg_table; + + coarse_pg_table = tf_alloc_coarse_page_table( + &(connection->cpt_alloc_context), + TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED); + + if (coarse_pg_table == NULL) { + printk(KERN_ERR "tf_init_shared_memory(%p)" + ": descriptor %d coarse page %d - " + "tf_alloc_coarse_page_table() " + "failed\n", + connection, + i, + coarse_page_index); + error = -ENOMEM; + goto error; + } + + shmem_desc->coarse_pg_table[coarse_page_index] = + coarse_pg_table; + } + shmem_desc->coarse_pg_table_count = 0; + + shmem_desc->type = TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM; + atomic_set(&shmem_desc->ref_count, 1); + + /* + * add this preallocated descriptor to the list of free + * descriptors Keep the device context specific one at the + * beginning of the list + */ + INIT_LIST_HEAD(&(shmem_desc->list)); + list_add_tail(&(shmem_desc->list), + &(connection->free_shmem_list)); + } + + /* allocate memory for the vmas structure */ + connection->vmas = + (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL); + if (connection->vmas == NULL) { + printk(KERN_ERR "tf_init_shared_memory(%p):" + " vmas - failed to get_zeroed_page\n", + connection); + error = -ENOMEM; + goto error; + } + + return 0; + +error: + tf_cleanup_shared_memories(connection); + return error; +} + +/*---------------------------------------------------------------------------- + * Connection operations to the Secure World + *----------------------------------------------------------------------------*/ + +int tf_create_device_context( + struct tf_connection *connection) +{ + union tf_command command; + union tf_answer answer; + int error = 0; + + dprintk(KERN_INFO "tf_create_device_context(%p)\n", + connection); + + command.create_device_context.message_type = + TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT; + command.create_device_context.message_size = + (sizeof(struct tf_command_create_device_context) + - sizeof(struct tf_command_header))/sizeof(u32); + command.create_device_context.operation_id = (u32) &answer; + command.create_device_context.device_context_id = (u32) connection; + + error = tf_send_receive( + &connection->dev->sm, + &command, + &answer, + connection, + true); + + if ((error != 0) || + (answer.create_device_context.error_code != S_SUCCESS)) + goto error; + + /* + * CREATE_DEVICE_CONTEXT succeeded, + * store device context handler and update connection status + */ + connection->device_context = + answer.create_device_context.device_context; + spin_lock(&(connection->state_lock)); + connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT; + spin_unlock(&(connection->state_lock)); + + /* successful completion */ + dprintk(KERN_INFO "tf_create_device_context(%p):" + " device_context=0x%08x\n", + connection, + answer.create_device_context.device_context); + return 0; + +error: + if (error != 0) { + dprintk(KERN_ERR "tf_create_device_context failed with " + "error %d\n", error); + } else { + /* + * We sent a DeviceCreateContext. The state is now + * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be + * reset if we ever want to send a DeviceCreateContext again + */ + spin_lock(&(connection->state_lock)); + connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT; + spin_unlock(&(connection->state_lock)); + dprintk(KERN_ERR "tf_create_device_context failed with " + "error_code 0x%08X\n", + answer.create_device_context.error_code); + if (answer.create_device_context.error_code == + S_ERROR_OUT_OF_MEMORY) + error = -ENOMEM; + else + error = -EFAULT; + } + + return error; +} + +/* Check that the current application belongs to the + * requested GID */ +static bool tf_check_gid(gid_t requested_gid) +{ + if (requested_gid == current_egid()) { + return true; + } else { + u32 size; + u32 i; + /* Look in the supplementary GIDs */ + get_group_info(GROUP_INFO); + size = GROUP_INFO->ngroups; + for (i = 0; i < size; i++) + if (requested_gid == GROUP_AT(GROUP_INFO , i)) + return true; + } + return false; +} + +/* + * Opens a client session to the Secure World + */ +int tf_open_client_session( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer) +{ + int error = 0; + struct tf_shmem_desc *shmem_desc[4] = {NULL}; + u32 i; + + dprintk(KERN_INFO "tf_open_client_session(%p)\n", connection); + + /* + * Initialize the message size with no login data. This will be later + * adjusted the the cases below + */ + command->open_client_session.message_size = + (sizeof(struct tf_command_open_client_session) - 20 + - sizeof(struct tf_command_header))/4; + + switch (command->open_client_session.login_type) { + case TF_LOGIN_PUBLIC: + /* Nothing to do */ + break; + + case TF_LOGIN_USER: + /* + * Send the EUID of the calling application in the login data. + * Update message size. + */ + *(u32 *) &command->open_client_session.login_data = + current_euid(); +#ifndef CONFIG_ANDROID + command->open_client_session.login_type = + (u32) TF_LOGIN_USER_LINUX_EUID; +#else + command->open_client_session.login_type = + (u32) TF_LOGIN_USER_ANDROID_EUID; +#endif + + /* Added one word */ + command->open_client_session.message_size += 1; + break; + + case TF_LOGIN_GROUP: { + /* Check requested GID */ + gid_t requested_gid = + *(u32 *) command->open_client_session.login_data; + + if (!tf_check_gid(requested_gid)) { + dprintk(KERN_ERR "tf_open_client_session(%p) " + "TF_LOGIN_GROUP: requested GID (0x%x) does " + "not match real eGID (0x%x)" + "or any of the supplementary GIDs\n", + connection, requested_gid, current_egid()); + error = -EACCES; + goto error; + } +#ifndef CONFIG_ANDROID + command->open_client_session.login_type = + TF_LOGIN_GROUP_LINUX_GID; +#else + command->open_client_session.login_type = + TF_LOGIN_GROUP_ANDROID_GID; +#endif + + command->open_client_session.message_size += 1; /* GID */ + break; + } + +#ifndef CONFIG_ANDROID + case TF_LOGIN_APPLICATION: { + /* + * Compute SHA-1 hash of the application fully-qualified path + * name. Truncate the hash to 16 bytes and send it as login + * data. Update message size. + */ + u8 pSHA1Hash[SHA1_DIGEST_SIZE]; + + error = tf_hash_application_path_and_data(pSHA1Hash, + NULL, 0); + if (error != 0) { + dprintk(KERN_ERR "tf_open_client_session: " + "error in tf_hash_application_path_and_data\n"); + goto error; + } + memcpy(&command->open_client_session.login_data, + pSHA1Hash, 16); + command->open_client_session.login_type = + TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH; + /* 16 bytes */ + command->open_client_session.message_size += 4; + break; + } +#else + case TF_LOGIN_APPLICATION: + /* + * Send the real UID of the calling application in the login + * data. Update message size. + */ + *(u32 *) &command->open_client_session.login_data = + current_uid(); + + command->open_client_session.login_type = + (u32) TF_LOGIN_APPLICATION_ANDROID_UID; + + /* Added one word */ + command->open_client_session.message_size += 1; + break; +#endif + +#ifndef CONFIG_ANDROID + case TF_LOGIN_APPLICATION_USER: { + /* + * Compute SHA-1 hash of the concatenation of the application + * fully-qualified path name and the EUID of the calling + * application. Truncate the hash to 16 bytes and send it as + * login data. Update message size. + */ + u8 pSHA1Hash[SHA1_DIGEST_SIZE]; + + error = tf_hash_application_path_and_data(pSHA1Hash, + (u8 *) &(current_euid()), sizeof(current_euid())); + if (error != 0) { + dprintk(KERN_ERR "tf_open_client_session: " + "error in tf_hash_application_path_and_data\n"); + goto error; + } + memcpy(&command->open_client_session.login_data, + pSHA1Hash, 16); + command->open_client_session.login_type = + TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH; + + /* 16 bytes */ + command->open_client_session.message_size += 4; + + break; + } +#else + case TF_LOGIN_APPLICATION_USER: + /* + * Send the real UID and the EUID of the calling application in + * the login data. Update message size. + */ + *(u32 *) &command->open_client_session.login_data = + current_uid(); + *(u32 *) &command->open_client_session.login_data[4] = + current_euid(); + + command->open_client_session.login_type = + TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID; + + /* Added two words */ + command->open_client_session.message_size += 2; + break; +#endif + +#ifndef CONFIG_ANDROID + case TF_LOGIN_APPLICATION_GROUP: { + /* + * Check requested GID. Compute SHA-1 hash of the concatenation + * of the application fully-qualified path name and the + * requested GID. Update message size + */ + gid_t requested_gid; + u8 pSHA1Hash[SHA1_DIGEST_SIZE]; + + requested_gid = *(u32 *) &command->open_client_session. + login_data; + + if (!tf_check_gid(requested_gid)) { + dprintk(KERN_ERR "tf_open_client_session(%p) " + "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) " + "does not match real eGID (0x%x)" + "or any of the supplementary GIDs\n", + connection, requested_gid, current_egid()); + error = -EACCES; + goto error; + } + + error = tf_hash_application_path_and_data(pSHA1Hash, + &requested_gid, sizeof(u32)); + if (error != 0) { + dprintk(KERN_ERR "tf_open_client_session: " + "error in tf_hash_application_path_and_data\n"); + goto error; + } + + memcpy(&command->open_client_session.login_data, + pSHA1Hash, 16); + command->open_client_session.login_type = + TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH; + + /* 16 bytes */ + command->open_client_session.message_size += 4; + break; + } +#else + case TF_LOGIN_APPLICATION_GROUP: { + /* + * Check requested GID. Send the real UID and the requested GID + * in the login data. Update message size. + */ + gid_t requested_gid; + + requested_gid = *(u32 *) &command->open_client_session. + login_data; + + if (!tf_check_gid(requested_gid)) { + dprintk(KERN_ERR "tf_open_client_session(%p) " + "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) " + "does not match real eGID (0x%x)" + "or any of the supplementary GIDs\n", + connection, requested_gid, current_egid()); + error = -EACCES; + goto error; + } + + *(u32 *) &command->open_client_session.login_data = + current_uid(); + *(u32 *) &command->open_client_session.login_data[4] = + requested_gid; + + command->open_client_session.login_type = + TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID; + + /* Added two words */ + command->open_client_session.message_size += 2; + + break; + } +#endif + + case TF_LOGIN_PRIVILEGED: + /* A privileged login may be performed only on behalf of the + kernel itself or on behalf of a process with euid=0 or + egid=0. */ + if (connection->owner == TF_CONNECTION_OWNER_KERNEL) { + dprintk(KERN_DEBUG "tf_open_client_session: " + "TF_LOGIN_PRIVILEGED for kernel API\n"); + command->open_client_session.login_type = + TF_LOGIN_PRIVILEGED_KERNEL; + } else { + dprintk(KERN_DEBUG "tf_open_client_session: " + "TF_LOGIN_PRIVILEGED for %u:%u\n", + current_euid(), current_egid()); + command->open_client_session.login_type = + TF_LOGIN_PRIVILEGED; + } + break; + + case TF_LOGIN_AUTHENTICATION: { + /* + * Compute SHA-1 hash of the application binary + * Send this hash as the login data (20 bytes) + */ + + u8 *hash; + hash = &(command->open_client_session.login_data[0]); + + error = tf_get_current_process_hash(hash); + if (error != 0) { + dprintk(KERN_ERR "tf_open_client_session: " + "error in tf_get_current_process_hash\n"); + goto error; + } + command->open_client_session.login_type = + TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH; + + /* 20 bytes */ + command->open_client_session.message_size += 5; + break; + } + + case TF_LOGIN_PRIVILEGED_KERNEL: + /* A kernel login may be performed only on behalf of the + kernel itself. */ + if (connection->owner == TF_CONNECTION_OWNER_KERNEL) { + dprintk(KERN_DEBUG "tf_open_client_session: " + "TF_LOGIN_PRIVILEGED_KERNEL for kernel API\n"); + command->open_client_session.login_type = + TF_LOGIN_PRIVILEGED_KERNEL; + } else { + dprintk(KERN_ERR "tf_open_client_session: " + " user %d, group %d not allowed to open " + "session with TF_LOGIN_PRIVILEGED_KERNEL\n", + current_euid(), current_egid()); + error = -EACCES; + goto error; + } + command->open_client_session.login_type = + TF_LOGIN_PRIVILEGED_KERNEL; + break; + + default: + dprintk(KERN_ERR "tf_open_client_session: " + "unknown login_type(%08X)\n", + command->open_client_session.login_type); + error = -EOPNOTSUPP; + goto error; + } + + /* Map the temporary memory references */ + for (i = 0; i < 4; i++) { + int param_type; + param_type = TF_GET_PARAM_TYPE( + command->open_client_session.param_types, i); + if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG | + TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG)) + == TF_PARAM_TYPE_MEMREF_FLAG) { + /* Map temp mem ref */ + error = tf_map_temp_shmem(connection, + &command->open_client_session. + params[i].temp_memref, + param_type, + &shmem_desc[i]); + if (error != 0) { + dprintk(KERN_ERR "tf_open_client_session: " + "unable to map temporary memory block " + "(%08X)\n", error); + goto error; + } + } + } + + /* Fill the handle of the Device Context */ + command->open_client_session.device_context = + connection->device_context; + + error = tf_send_receive( + &connection->dev->sm, + command, + answer, + connection, + true); + +error: + /* Unmap the temporary memory references */ + for (i = 0; i < 4; i++) + if (shmem_desc[i] != NULL) + tf_unmap_shmem(connection, shmem_desc[i], 0); + + if (error != 0) + dprintk(KERN_ERR "tf_open_client_session returns %d\n", + error); + else + dprintk(KERN_ERR "tf_open_client_session returns " + "error_code 0x%08X\n", + answer->open_client_session.error_code); + + return error; +} + + +/* + * Closes a client session from the Secure World + */ +int tf_close_client_session( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer) +{ + int error = 0; + + dprintk(KERN_DEBUG "tf_close_client_session(%p)\n", connection); + + command->close_client_session.message_size = + (sizeof(struct tf_command_close_client_session) - + sizeof(struct tf_command_header)) / 4; + command->close_client_session.device_context = + connection->device_context; + + error = tf_send_receive( + &connection->dev->sm, + command, + answer, + connection, + true); + + if (error != 0) + dprintk(KERN_ERR "tf_close_client_session returns %d\n", + error); + else + dprintk(KERN_ERR "tf_close_client_session returns " + "error 0x%08X\n", + answer->close_client_session.error_code); + + return error; +} + + +/* + * Registers a shared memory to the Secure World + */ +int tf_register_shared_memory( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer) +{ + int error = 0; + struct tf_shmem_desc *shmem_desc = NULL; + bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL; + struct tf_command_register_shared_memory *msg = + &command->register_shared_memory; + + dprintk(KERN_INFO "tf_register_shared_memory(%p) " + "%p[0x%08X][0x%08x]\n", + connection, + (void *)msg->shared_mem_descriptors[0], + msg->shared_mem_size, + (u32)msg->memory_flags); + + if (in_user_space) { + error = tf_validate_shmem_and_flags( + msg->shared_mem_descriptors[0], + msg->shared_mem_size, + (u32)msg->memory_flags); + if (error != 0) + goto error; + } + + /* Initialize message_size with no descriptors */ + msg->message_size + = (sizeof(struct tf_command_register_shared_memory) - + sizeof(struct tf_command_header)) / 4; + + /* Map the shmem block and update the message */ + if (msg->shared_mem_size == 0) { + /* Empty shared mem */ + msg->shared_mem_start_offset = msg->shared_mem_descriptors[0]; + } else { + u32 descriptorCount; + error = tf_map_shmem( + connection, + msg->shared_mem_descriptors[0], + msg->memory_flags, + in_user_space, + msg->shared_mem_descriptors, + &(msg->shared_mem_start_offset), + msg->shared_mem_size, + &shmem_desc, + &descriptorCount); + if (error != 0) { + dprintk(KERN_ERR "tf_register_shared_memory: " + "unable to map shared memory block\n"); + goto error; + } + msg->message_size += descriptorCount; + } + + /* + * write the correct device context handle and the address of the shared + * memory descriptor in the message + */ + msg->device_context = connection->device_context; + msg->block_id = (u32)shmem_desc; + + /* Send the updated message */ + error = tf_send_receive( + &connection->dev->sm, + command, + answer, + connection, + true); + + if ((error != 0) || + (answer->register_shared_memory.error_code + != S_SUCCESS)) { + dprintk(KERN_ERR "tf_register_shared_memory: " + "operation failed. Unmap block\n"); + goto error; + } + + /* Saves the block handle returned by the secure world */ + if (shmem_desc != NULL) + shmem_desc->block_identifier = + answer->register_shared_memory.block; + + /* successful completion */ + dprintk(KERN_INFO "tf_register_shared_memory(%p):" + " block_id=0x%08x block=0x%08x\n", + connection, msg->block_id, + answer->register_shared_memory.block); + return 0; + + /* error completion */ +error: + tf_unmap_shmem( + connection, + shmem_desc, + 0); + + if (error != 0) + dprintk(KERN_ERR "tf_register_shared_memory returns %d\n", + error); + else + dprintk(KERN_ERR "tf_register_shared_memory returns " + "error_code 0x%08X\n", + answer->register_shared_memory.error_code); + + return error; +} + + +/* + * Releases a shared memory from the Secure World + */ +int tf_release_shared_memory( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer) +{ + int error = 0; + + dprintk(KERN_DEBUG "tf_release_shared_memory(%p)\n", connection); + + command->release_shared_memory.message_size = + (sizeof(struct tf_command_release_shared_memory) - + sizeof(struct tf_command_header)) / 4; + command->release_shared_memory.device_context = + connection->device_context; + + error = tf_send_receive( + &connection->dev->sm, + command, + answer, + connection, + true); + + if ((error != 0) || + (answer->release_shared_memory.error_code != S_SUCCESS)) + goto error; + + /* Use block_id to get back the pointer to shmem_desc */ + tf_unmap_shmem( + connection, + (struct tf_shmem_desc *) + answer->release_shared_memory.block_id, + 0); + + /* successful completion */ + dprintk(KERN_INFO "tf_release_shared_memory(%p):" + " block_id=0x%08x block=0x%08x\n", + connection, answer->release_shared_memory.block_id, + command->release_shared_memory.block); + return 0; + + +error: + if (error != 0) + dprintk(KERN_ERR "tf_release_shared_memory returns %d\n", + error); + else + dprintk(KERN_ERR "tf_release_shared_memory returns " + "nChannelStatus 0x%08X\n", + answer->release_shared_memory.error_code); + + return error; + +} + + +#ifdef CONFIG_TF_ION +extern struct ion_device *omap_ion_device; +#endif /* CONFIG_TF_ION */ +/* + * Invokes a client command to the Secure World + */ +int tf_invoke_client_command( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer) +{ + int error = 0; + struct tf_shmem_desc *shmem_desc[4] = {NULL}; + int i; +#ifdef CONFIG_TF_ION + struct ion_handle *new_handle = NULL; +#endif /* CONFIG_TF_ION */ + + dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection); + + command->release_shared_memory.message_size = + (sizeof(struct tf_command_invoke_client_command) - + sizeof(struct tf_command_header)) / 4; + +#ifdef CONFIG_TF_ZEBRA + error = tf_crypto_try_shortcuted_update(connection, + (struct tf_command_invoke_client_command *) command, + (struct tf_answer_invoke_client_command *) answer); + if (error == 0) + return error; +#endif + + /* Map the tmprefs */ + for (i = 0; i < 4; i++) { + int param_type = TF_GET_PARAM_TYPE( + command->invoke_client_command.param_types, i); + + if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG | + TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG)) + == TF_PARAM_TYPE_MEMREF_FLAG) { + /* A temporary memref: map it */ + error = tf_map_temp_shmem(connection, + &command->invoke_client_command. + params[i].temp_memref, + param_type, &shmem_desc[i]); + if (error != 0) { + dprintk(KERN_ERR + "tf_invoke_client_command: " + "unable to map temporary memory " + "block\n (%08X)", error); + goto error; + } + } +#ifdef CONFIG_TF_ION + else if (param_type == TF_PARAM_TYPE_MEMREF_ION_HANDLE) { + struct tf_command_invoke_client_command *invoke; + ion_phys_addr_t ion_addr; + size_t ion_len; + struct ion_buffer *buffer; + + if (connection->ion_client == NULL) { + connection->ion_client = ion_client_create( + omap_ion_device, + (1 << ION_HEAP_TYPE_CARVEOUT), + "smc"); + } + if (connection->ion_client == NULL) { + dprintk(KERN_ERR "%s(%p): " + "unable to create ion client\n", + __func__, connection); + error = -EFAULT; + goto error; + } + + invoke = &command->invoke_client_command; + + dprintk(KERN_INFO "ion_handle %x", + invoke->params[i].value.a); + buffer = ion_share(connection->ion_client, + (struct ion_handle *)invoke->params[i].value.a); + if (buffer == NULL) { + dprintk(KERN_ERR "%s(%p): " + "unable to share ion handle\n", + __func__, connection); + error = -EFAULT; + goto error; + } + + dprintk(KERN_INFO "ion_buffer %p", buffer); + new_handle = ion_import(connection->ion_client, buffer); + if (new_handle == NULL) { + dprintk(KERN_ERR "%s(%p): " + "unable to import ion buffer\n", + __func__, connection); + error = -EFAULT; + goto error; + } + + dprintk(KERN_INFO "new_handle %x", new_handle); + error = ion_phys(connection->ion_client, + new_handle, + &ion_addr, + &ion_len); + if (error) { + dprintk(KERN_ERR + "%s: unable to convert ion handle " + "0x%08X (error code 0x%08X)\n", + __func__, + new_handle, + error); + error = -EINVAL; + goto error; + } + dprintk(KERN_INFO + "%s: handle=0x%08x phys_add=0x%08x length=0x%08x\n", + __func__, invoke->params[i].value.a, ion_addr, ion_len); + + invoke->params[i].value.a = (u32) ion_addr; + invoke->params[i].value.b = (u32) ion_len; + + invoke->param_types &= ~((0xF) << (4*i)); + invoke->param_types |= + TF_PARAM_TYPE_VALUE_INPUT << (4*i); + } +#endif /* CONFIG_TF_ION */ + } + + command->invoke_client_command.device_context = + connection->device_context; + + error = tf_send_receive(&connection->dev->sm, command, + answer, connection, true); + +error: +#ifdef CONFIG_TF_ION + if (new_handle != NULL) + ion_free(connection->ion_client, new_handle); +#endif /* CONFIG_TF_ION */ + /* Unmap de temp mem refs */ + for (i = 0; i < 4; i++) { + if (shmem_desc[i] != NULL) { + dprintk(KERN_INFO "tf_invoke_client_command: " + "UnMatemp_memref %d\n ", i); + + tf_unmap_shmem(connection, shmem_desc[i], 0); + } + } + + if (error != 0) + dprintk(KERN_ERR "tf_invoke_client_command returns %d\n", + error); + else + dprintk(KERN_ERR "tf_invoke_client_command returns " + "error_code 0x%08X\n", + answer->invoke_client_command.error_code); + + return error; +} + + +/* + * Cancels a client command from the Secure World + */ +int tf_cancel_client_command( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer) +{ + int error = 0; + + dprintk(KERN_DEBUG "tf_cancel_client_command(%p)\n", connection); + + command->cancel_client_operation.device_context = + connection->device_context; + command->cancel_client_operation.message_size = + (sizeof(struct tf_command_cancel_client_operation) - + sizeof(struct tf_command_header)) / 4; + + error = tf_send_receive( + &connection->dev->sm, + command, + answer, + connection, + true); + + if ((error != 0) || + (answer->cancel_client_operation.error_code != S_SUCCESS)) + goto error; + + + /* successful completion */ + return 0; + +error: + if (error != 0) + dprintk(KERN_ERR "tf_cancel_client_command returns %d\n", + error); + else + dprintk(KERN_ERR "tf_cancel_client_command returns " + "nChannelStatus 0x%08X\n", + answer->cancel_client_operation.error_code); + + return error; +} + + + +/* + * Destroys a device context from the Secure World + */ +int tf_destroy_device_context( + struct tf_connection *connection) +{ + int error; + /* + * AFY: better use the specialized tf_command_destroy_device_context + * structure: this will save stack + */ + union tf_command command; + union tf_answer answer; + + dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", connection); + + BUG_ON(connection == NULL); + + command.header.message_type = TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT; + command.header.message_size = + (sizeof(struct tf_command_destroy_device_context) - + sizeof(struct tf_command_header))/sizeof(u32); + + /* + * fill in the device context handler + * it is guarantied that the first shared memory descriptor describes + * the device context + */ + command.destroy_device_context.device_context = + connection->device_context; + + error = tf_send_receive( + &connection->dev->sm, + &command, + &answer, + connection, + false); + + if ((error != 0) || + (answer.destroy_device_context.error_code != S_SUCCESS)) + goto error; + + spin_lock(&(connection->state_lock)); + connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT; + spin_unlock(&(connection->state_lock)); + + /* successful completion */ + dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", + connection); + return 0; + +error: + if (error != 0) { + dprintk(KERN_ERR "tf_destroy_device_context failed with " + "error %d\n", error); + } else { + dprintk(KERN_ERR "tf_destroy_device_context failed with " + "error_code 0x%08X\n", + answer.destroy_device_context.error_code); + if (answer.destroy_device_context.error_code == + S_ERROR_OUT_OF_MEMORY) + error = -ENOMEM; + else + error = -EFAULT; + } + + return error; +} + + +/*---------------------------------------------------------------------------- + * Connection initialization and cleanup operations + *----------------------------------------------------------------------------*/ + +/* + * Opens a connection to the specified device. + * + * The placeholder referenced by connection is set to the address of the + * new connection; it is set to NULL upon failure. + * + * Returns zero upon successful completion, or an appropriate error code upon + * failure. + */ +int tf_open(struct tf_device *dev, + struct file *file, + struct tf_connection **connection) +{ + int error; + struct tf_connection *conn = NULL; + + dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection); + + /* + * Allocate and initialize the conn. + * kmalloc only allocates sizeof(*conn) virtual memory + */ + conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn), + GFP_KERNEL); + if (conn == NULL) { + printk(KERN_ERR "tf_open(): " + "Out of memory for conn!\n"); + error = -ENOMEM; + goto error; + } + + memset(conn, 0, sizeof(*conn)); + + conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT; + conn->dev = dev; + spin_lock_init(&(conn->state_lock)); + atomic_set(&(conn->pending_op_count), 0); + INIT_LIST_HEAD(&(conn->list)); + + /* + * Initialize the shared memory + */ + error = tf_init_shared_memory(conn); + if (error != 0) + goto error; + +#ifdef CONFIG_TF_ZEBRA + /* + * Initialize CUS specifics + */ + tf_crypto_init_cus(conn); +#endif + + /* + * Attach the conn to the device. + */ + spin_lock(&(dev->connection_list_lock)); + list_add(&(conn->list), &(dev->connection_list)); + spin_unlock(&(dev->connection_list_lock)); + + /* + * Successful completion. + */ + + *connection = conn; + + dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn); + return 0; + + /* + * Error handling. + */ + +error: + dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error); + /* Deallocate the descriptor pages if necessary */ + internal_kfree(conn); + *connection = NULL; + return error; +} + + +/* + * Closes the specified connection. + * + * Upon return, the connection has been destroyed and cannot be used anymore. + * + * This function does nothing if connection is set to NULL. + */ +void tf_close(struct tf_connection *connection) +{ + int error; + enum TF_CONN_STATE state; + + dprintk(KERN_DEBUG "tf_close(%p)\n", connection); + + if (connection == NULL) + return; + + /* + * Assumption: Linux guarantees that no other operation is in progress + * and that no other operation will be started when close is called + */ + BUG_ON(atomic_read(&(connection->pending_op_count)) != 0); + + /* + * Exchange a Destroy Device Context message if needed. + */ + spin_lock(&(connection->state_lock)); + state = connection->state; + spin_unlock(&(connection->state_lock)); + if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) { + /* + * A DestroyDeviceContext operation was not performed. Do it + * now. + */ + error = tf_destroy_device_context(connection); + if (error != 0) + /* avoid cleanup if destroy device context fails */ + goto error; + } + + /* + * Clean up the shared memory + */ + tf_cleanup_shared_memories(connection); + +#ifdef CONFIG_TF_ION + if (connection->ion_client != NULL) + ion_client_destroy(connection->ion_client); +#endif + + spin_lock(&(connection->dev->connection_list_lock)); + list_del(&(connection->list)); + spin_unlock(&(connection->dev->connection_list_lock)); + + internal_kfree(connection); + + return; + +error: + dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n", + connection, error); +} diff --git a/security/smc/tf_conn.h b/security/smc/tf_conn.h new file mode 100644 index 0000000..d2c8261 --- /dev/null +++ b/security/smc/tf_conn.h @@ -0,0 +1,87 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifndef __TF_CONN_H__ +#define __TF_CONN_H__ + +#include "tf_defs.h" + +/* + * Returns a pointer to the connection referenced by the + * specified file. + */ +static inline struct tf_connection *tf_conn_from_file( + struct file *file) +{ + return file->private_data; +} + +/*---------------------------------------------------------------------------- + * Connection operations to the Secure World + *----------------------------------------------------------------------------*/ + +int tf_create_device_context( + struct tf_connection *connection); + +int tf_destroy_device_context( + struct tf_connection *connection); + +int tf_open_client_session( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer); + +int tf_close_client_session( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer); + +int tf_register_shared_memory( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer); + +int tf_release_shared_memory( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer); + +int tf_invoke_client_command( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer); + +int tf_cancel_client_command( + struct tf_connection *connection, + union tf_command *command, + union tf_answer *answer); + +/*---------------------------------------------------------------------------- + * Connection initialization and cleanup operations + *----------------------------------------------------------------------------*/ + +int tf_open(struct tf_device *dev, + struct file *file, + struct tf_connection **connection); + +void tf_close( + struct tf_connection *connection); + + +#endif /* !defined(__TF_CONN_H__) */ diff --git a/security/smc/tf_crypto.c b/security/smc/tf_crypto.c new file mode 100644 index 0000000..7edca0f --- /dev/null +++ b/security/smc/tf_crypto.c @@ -0,0 +1,1278 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include "tf_defs.h" +#include "tf_util.h" +#include "tf_zebra.h" +#include "tf_crypto.h" +#include "tf_dma.h" + +#define IO_ADDRESS OMAP2_L4_IO_ADDRESS + +#define S_SUCCESS 0x00000000 +#define S_ERROR_GENERIC 0xFFFF0000 +#define S_ERROR_ACCESS_DENIED 0xFFFF0001 +#define S_ERROR_BAD_FORMAT 0xFFFF0005 +#define S_ERROR_BAD_PARAMETERS 0xFFFF0006 +#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C +#define S_ERROR_SHORT_BUFFER 0xFFFF0010 +#define S_ERROR_UNREACHABLE 0xFFFF3013 +#define S_ERROR_SERVICE 0xFFFF1000 + +#define CKR_OK 0x00000000 + +#define PUBLIC_CRYPTO_TIMEOUT_CONST 0x000FFFFF + +#define RPC_AES1_CODE PUBLIC_CRYPTO_HWA_AES1 +#define RPC_DES_CODE PUBLIC_CRYPTO_HWA_DES +#define RPC_SHA_CODE PUBLIC_CRYPTO_HWA_SHA + +#define RPC_CRYPTO_COMMAND_MASK 0x000003c0 + +#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR 0x200 +#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_UNLOCK 0x000 +#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_LOCK 0x001 + +#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT 0x240 +#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_AES1 RPC_AES1_CODE +#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_DES RPC_DES_CODE +#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_SHA RPC_SHA_CODE +#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_SUSPEND 0x010 +#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_UNINSTALL 0x020 + +#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS 0x280 +#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES1 RPC_AES1_CODE +#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_DES RPC_DES_CODE +#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_SHA RPC_SHA_CODE +#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_RESUME 0x010 + +#define RPC_CLEAR_GLOBAL_KEY_CONTEXT 0x2c0 +#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_AES 0x001 +#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_DES 0x002 + +#define ENABLE_CLOCK true +#define DISABLE_CLOCK false + +/*---------------------------------------------------------------------------*/ +/*RPC IN/OUT structures for CUS implementation */ +/*---------------------------------------------------------------------------*/ + +struct rpc_install_shortcut_lock_accelerator_out { + u32 shortcut_id; + u32 error; +}; + +struct rpc_install_shortcut_lock_accelerator_in { + u32 device_context_id; + u32 client_session; + u32 command_id; + u32 key_context; + /** + *The identifier of the HWA accelerator that this shortcut uses! + *Possible values are: + *- 1 (RPC_AES1_CODE) + *- 4 (RPC_DES_CODE) + *- 8 (RPC_SHA_CODE) + **/ + u32 hwa_id; + /** + *This field defines the algorithm, direction, mode, key size. + *It contains some of the bits of the corresponding "CTRL" register + *of the accelerator. + * + *More precisely: + *For AES1 accelerator, hwa_ctrl contains the following bits: + *- CTR (bit 6): + * when 1, selects CTR mode. + * when 0, selects CBC or ECB mode (according to CBC bit) + *- CBC (bit 5) + * when 1, selects CBC mode (but only if CTR=0) + * when 0, selects EBC mode (but only if CTR=0) + *- DIRECTION (bit 2) + * 0: decryption + * 1: encryption + * + *For the DES2 accelerator, hwa_ctrl contains the following bits: + *- CBC (bit 4): 1 for CBC, 0 for ECB + *- DIRECTION (bit 2): 0 for decryption, 1 for encryption + * + *For the SHA accelerator, hwa_ctrl contains the following bits: + *- ALGO (bit 2:1): + * 0x0: MD5 + * 0x1: SHA1 + * 0x2: SHA-224 + * 0x3: SHA-256 + **/ + u32 hwa_ctrl; + union tf_crypto_operation_state operation_state; +}; + +struct rpc_lock_hwa_suspend_shortcut_out { + union tf_crypto_operation_state operation_state; +}; + +struct rpc_lock_hwa_suspend_shortcut_in { + u32 shortcut_id; +}; + +struct rpc_resume_shortcut_unlock_hwa_in { + u32 shortcut_id; + u32 aes1_key_context; + u32 reserved; + u32 des_key_context; + union tf_crypto_operation_state operation_state; +}; + +/*------------------------------------------------------------------------- */ +/* + * tf_get_device_context(struct cus_context *cus) + * search in the all the device context (connection_list) if the CUS context + * specified by cus exist. + * + * If it is found, return the device context where the CUS context is. + * If is is not found, return NULL. + */ +static struct tf_connection *tf_get_device_context( + struct cus_context *cus) +{ + struct tf_connection *connection = NULL; + struct cus_context *cusFromList = NULL; + struct tf_device *dev = tf_get_device(); + + spin_lock(&(dev->connection_list_lock)); + list_for_each_entry(connection, &(dev->connection_list), + list) { + spin_lock(&(connection->shortcut_list_lock)); + list_for_each_entry(cusFromList, + &(connection->shortcut_list), list) { + if ((u32)cusFromList == (u32)cus) { + spin_unlock(&(connection-> + shortcut_list_lock)); + spin_unlock(&(dev-> + connection_list_lock)); + return connection; + } + } + spin_unlock(&(connection-> + shortcut_list_lock)); + } + spin_unlock(&(dev->connection_list_lock)); + + /*cus does not exist */ + return NULL; +} + +/*------------------------------------------------------------------------- */ +/* + * Get the shared memory from the memory block handle coming from secure. + * Return NULL if it does not exist. + */ +static struct tf_shmem_desc *tf_get_shmem_from_block_handle( + struct tf_connection *connection, u32 block) +{ + struct tf_shmem_desc *shmem_desc = NULL; + + mutex_lock(&(connection->shmem_mutex)); + + list_for_each_entry(shmem_desc, + &(connection->used_shmem_list), list) { + if ((u32) shmem_desc->block_identifier == + (u32) block) { + mutex_unlock(&(connection->shmem_mutex)); + return shmem_desc; + } + } + + /* block does not exist */ + mutex_unlock(&(connection->shmem_mutex)); + + return NULL; +} + +/*------------------------------------------------------------------------- */ +/* + * HWA public lock or unlock one HWA according algo specified by hwa_id + */ +void tf_crypto_lock_hwa(u32 hwa_id, bool do_lock) +{ + struct semaphore *s = NULL; + struct tf_device *dev = tf_get_device(); + + dprintk(KERN_INFO "[pid=%d] %s: hwa_id=0x%04X do_lock=%d\n", + current->pid, __func__, hwa_id, do_lock); + + switch (hwa_id) { + case RPC_AES1_CODE: + s = &dev->aes1_sema; + break; + case RPC_DES_CODE: + s = &dev->des_sema; + break; + default: + case RPC_SHA_CODE: + s = &dev->sha_sema; + break; + } + + if (do_lock == LOCK_HWA) { + dprintk(KERN_INFO "tf_crypto_lock_hwa: " + "Wait for HWAID=0x%04X\n", hwa_id); + while (down_trylock(s)) + cpu_relax(); + dprintk(KERN_INFO "tf_crypto_lock_hwa: " + "Locked on HWAID=0x%04X\n", hwa_id); + } else { + up(s); + dprintk(KERN_INFO "tf_crypto_lock_hwa: " + "Released for HWAID=0x%04X\n", hwa_id); + } +} + +/*------------------------------------------------------------------------- */ +/* + * HWAs public lock or unlock HWA's specified in the HWA H/A/D fields of RPC + * command rpc_command + */ +static void tf_crypto_lock_hwas(u32 rpc_command, bool do_lock) +{ + dprintk(KERN_INFO + "tf_crypto_lock_hwas: rpc_command=0x%08x do_lock=%d\n", + rpc_command, do_lock); + + /* perform the locks */ + if (rpc_command & RPC_AES1_CODE) + tf_crypto_lock_hwa(RPC_AES1_CODE, do_lock); + + if (rpc_command & RPC_DES_CODE) + tf_crypto_lock_hwa(RPC_DES_CODE, do_lock); + + if (rpc_command & RPC_SHA_CODE) + tf_crypto_lock_hwa(RPC_SHA_CODE, do_lock); +} + +/*------------------------------------------------------------------------- */ +/** + *Initialize the public crypto DMA channels, global HWA semaphores and handles + */ +u32 tf_crypto_init(void) +{ + struct tf_device *dev = tf_get_device(); + u32 error = PUBLIC_CRYPTO_OPERATION_SUCCESS; + + /* Initialize HWAs */ + tf_aes_init(); + tf_des_init(); + tf_digest_init(); + + /*initialize the HWA semaphores */ + sema_init(&dev->aes1_sema, 1); + sema_init(&dev->des_sema, 1); + sema_init(&dev->sha_sema, 1); + + /*initialize the current key handle loaded in the AESn/DES HWA */ + dev->aes1_key_context = 0; + dev->des_key_context = 0; + dev->sham1_is_public = false; + + /*initialize the DMA semaphores */ + mutex_init(&dev->sm.dma_mutex); + + /*allocate DMA buffer */ + dev->dma_buffer_length = PAGE_SIZE * 16; + dev->dma_buffer = dma_alloc_coherent(NULL, + dev->dma_buffer_length, + &(dev->dma_buffer_phys), + GFP_KERNEL); + if (dev->dma_buffer == NULL) { + printk(KERN_ERR + "tf_crypto_init: Out of memory for DMA buffer\n"); + error = S_ERROR_OUT_OF_MEMORY; + } + + return error; +} + +/*------------------------------------------------------------------------- */ +/* + *Initialize the device context CUS fields (shortcut semaphore and public CUS + *list) + */ +void tf_crypto_init_cus(struct tf_connection *connection) +{ + /*initialize the CUS list in the given device context */ + spin_lock_init(&(connection->shortcut_list_lock)); + INIT_LIST_HEAD(&(connection->shortcut_list)); +} + +/*------------------------------------------------------------------------- */ +/** + *Terminate the public crypto (including DMA) + */ +void tf_crypto_terminate(void) +{ + struct tf_device *dev = tf_get_device(); + + if (dev->dma_buffer != NULL) { + dma_free_coherent(NULL, dev->dma_buffer_length, + dev->dma_buffer, + dev->dma_buffer_phys); + dev->dma_buffer = NULL; + } + + tf_digest_exit(); + tf_des_exit(); + tf_aes_exit(); +} + +/*------------------------------------------------------------------------- */ +/* + *Perform a crypto update operation. + *THIS FUNCTION IS CALLED FROM THE IOCTL + */ +static bool tf_crypto_update( + struct cus_context *cus, + struct cus_params *params) +{ + bool status = true; + dprintk(KERN_INFO + "tf_crypto_update(%x): "\ + "HWAID=0x%x, In=%p, Out=%p, Len=%u\n", + (uint32_t) cus, cus->hwa_id, + params->input_data, + params->output_data, params->input_data_length); + + /* Enable the clock and Process Data */ + switch (cus->hwa_id) { + case RPC_AES1_CODE: + tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); + cus->operation_state.aes.key_is_public = 0; + cus->operation_state.aes.CTRL = cus->hwa_ctrl; + status = tf_aes_update( + &cus->operation_state.aes, + params->input_data, + params->output_data, + params->input_data_length / AES_BLOCK_SIZE); + tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); + break; + + case RPC_DES_CODE: + tf_crypto_enable_clock(PUBLIC_CRYPTO_DES3DES_CLOCK_REG); + status = tf_des_update( + cus->hwa_ctrl, + &cus->operation_state.des, + params->input_data, + params->output_data, + params->input_data_length / DES_BLOCK_SIZE); + tf_crypto_disable_clock(PUBLIC_CRYPTO_DES3DES_CLOCK_REG); + break; + + case RPC_SHA_CODE: + tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); + cus->operation_state.sha.CTRL = cus->hwa_ctrl; + status = tf_digest_update( + &cus->operation_state.sha, + params->input_data, + params->input_data_length); + tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); + break; + + default: + BUG_ON(1); + break; + } + + dprintk(KERN_INFO "tf_crypto_update: Done\n"); + return status; +} + +/*------------------------------------------------------------------------- */ + +/* + *Check if the command must be intercepted by a CUS or not. + *THIS FUNCTION IS CALLED FROM THE USER THREAD (ioctl). + * + *inputs: struct tf_connection *connection : current device context + * tf_command_invoke_client_command *command : the command + * bool incrementuse_count : specify if the use_count must be incremented + *output: + * struct cus_context **cus_ctx : the public CUS + * if it is shortcuted + *return: true or false + * + */ +static bool tf_crypto_is_shortcuted_command( + struct tf_connection *connection, + struct tf_command_invoke_client_command *command, + struct cus_context **cus_ctx, + bool incrementuse_count) +{ + struct tf_device *dev = tf_get_device(); + struct cus_context *cus = NULL; + *cus_ctx = NULL; + + dprintk(KERN_INFO "tf_crypto_is_shortcuted_command: "\ + "connection=0x%08x, command=0x%08x, "\ + "CltSession=0x%08x, CmdID=0x%08x\n", + (uint32_t) connection, (uint32_t) command, + (uint32_t) command->client_session, + command->client_command_identifier); + + /*take shortcut_list_lock for the device context + *in which the message is sent <=> make sure that nobody is + *going to change data while processing */ + spin_lock(&(connection->shortcut_list_lock)); + + /*lookup in the list of shortcuts attached to the device context for a + *shortcut context that contains the same client_session as the command + *and such that command_id is equal to client_command_identifier of the + *INVOKE_CLIENT_COMMAND message. If no such shortcut exists, take the + *standard path */ + list_for_each_entry( + cus, &(connection->shortcut_list), list) { + dprintk(KERN_INFO + "tf_crypto_is_shortcuted_command: "\ + "command_id = 0x%08x client_session = 0x%08x\n", + cus->command_id, cus->client_session); + + if ((cus->client_session == command->client_session) + && + (cus->command_id == command-> + client_command_identifier)) { + dprintk(KERN_INFO + "tf_crypto_is_shortcuted_command: "\ + "shortcut is identified\n"); + /*find a CUS : check if is suspended or not */ + if (cus->suspended) { + /* + * suspended of the shortcut context is set to + * true, it means that the secure world has + * suspended the shortcut to perform an update + * on its own. In this case, take the standard + * path. This should happen very rarely because + * the client and the service should generally + * communicate to avoid such a collision + */ + dprintk(KERN_INFO "shortcut exists but "\ + "suspended\n"); + goto command_not_shortcutable; + + } else { + dprintk(KERN_INFO "shortcut exists\n"); + /*For AES and DES/3DES operations, + *provisionally determine if the accelerator + *is loaded with the appropriate key before + *deciding to enter the accelerator critical + *section. In most cases, if some other thread + *or the secure world is currently using the + *accelerator, the key won't change. + *So, if the key doesn't match now, it is + *likely not to match later on, so we'd better + *not try to enter the critical section in this + *case: */ + + if (cus->hwa_id == RPC_AES1_CODE && + cus-> + key_context != dev-> + aes1_key_context) { + /*For AES operations, atomically read + *g_hAES1SSecureKeyContext and check if + *it is equal to key_context. If not, + *take the standard path <=> do not + *shortcut */ + dprintk(KERN_INFO + "shortcut exists but AES key "\ + "not correct\nkey_context="\ + "0x%08x vs 0x%08x\n", + cus->key_context, + dev-> + aes1_key_context); + goto command_not_shortcutable; + + } else if (cus->hwa_id == RPC_DES_CODE + && cus->key_context != + dev-> + des_key_context) { + /* + * For DES/3DES atomically read + * des_key_context and check if + * it is equal to key_context. If not, + * take the standard path <=> do not + * shortcut + */ + dprintk(KERN_INFO + "shortcut exists but DES key " + "not correct " + "des_key_context = 0x%08x" + " key_context0x%08x\n", + (u32)dev-> + des_key_context, + (u32)cus->key_context); + goto command_not_shortcutable; + } else if (cus->hwa_id == RPC_SHA_CODE + && !dev->sham1_is_public) { + /* + * For digest operations, atomically + * read sham1_is_public and check if it + * is true. If not, no shortcut. + */ + dprintk(KERN_INFO + "shortcut exists but SHAM1 " + "is not accessible in public"); + goto command_not_shortcutable; + } + } + + dprintk(KERN_INFO "shortcut exists and enable\n"); + + /*Shortcut has been found and context fits with + *thread => YES! the command can be shortcuted */ + + /* + *set the pointer on the corresponding session + *(eq CUS context) + */ + *cus_ctx = cus; + + /* + *increment use_count if required + */ + if (incrementuse_count) + cus->use_count++; + + /* + *release shortcut_list_lock + */ + spin_unlock(&(connection-> + shortcut_list_lock)); + return true; + } + } + + command_not_shortcutable: + /* + *release shortcut_list_lock + */ + spin_unlock(&(connection->shortcut_list_lock)); + *cus_ctx = NULL; + return false; +} + +/*------------------------------------------------------------------------- */ +/* + * Pre-process the client command (crypto update operation), i.e., parse the + * command message (decode buffers, etc.) THIS FUNCTION IS CALLED FROM THE USER + * THREAD (ioctl). + * + * For incorrect messages, an error is returned and the message will be sent to + * secure + */ +static bool tf_crypto_parse_command_message(struct tf_connection *connection, + struct cus_context *cus, + struct tf_command_invoke_client_command *command, + struct cus_params *params) +{ + u32 param_type; + u32 input_data_length; + u32 output_data_length; + u8 *input_data; + u8 *output_data; + struct tf_shmem_desc *input_shmem = NULL; + struct tf_shmem_desc *output_shmem = NULL; + + dprintk(KERN_INFO + "tf_crypto_parse_command_message(%p) : Session=0x%x\n", + cus, cus->client_session); + + if (command->params[0].temp_memref.size == 0) + return false; + + param_type = TF_GET_PARAM_TYPE(command->param_types, 0); + switch (param_type) { + case TF_PARAM_TYPE_MEMREF_TEMP_INPUT: + if (command->params[0].temp_memref.descriptor == 0) + return false; + + input_data = (u8 *) command->params[0].temp_memref. + descriptor; + input_data_length = command->params[0].temp_memref.size; + + break; + + case TF_PARAM_TYPE_MEMREF_INPUT: + input_shmem = tf_get_shmem_from_block_handle(connection, + command->params[0].memref.block); + + if (input_shmem == NULL) + return false; + atomic_inc(&input_shmem->ref_count); + + input_data = input_shmem->pBuffer + + command->params[0].memref.offset; + input_data_length = command->params[0].memref.size; + + break; + + default: + return false; + } + + if (cus->hwa_id != RPC_SHA_CODE) { + if (command->params[1].temp_memref.size == 0) + goto err0; + + /* We need an output buffer as well */ + param_type = TF_GET_PARAM_TYPE(command->param_types, 1); + switch (param_type) { + case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT: + output_data = + (u8 *) command->params[1].temp_memref. + descriptor; + output_data_length = + command->params[1].temp_memref.size; + + break; + + case TF_PARAM_TYPE_MEMREF_OUTPUT: + if (command->params[1].temp_memref.descriptor == 0) + return false; + + output_shmem = tf_get_shmem_from_block_handle( + connection, command->params[1].memref.block); + if (output_shmem == NULL) + goto err0; + atomic_inc(&output_shmem->ref_count); + + output_data = output_shmem->pBuffer + + command->params[1].memref.offset; + output_data_length = command->params[1].memref.size; + + break; + + default: + dprintk(KERN_ERR "tf_crypto_parse_command_message: " + "Encrypt/decrypt operations require an output " + "buffer\n"); + + goto err0; + } + + if (output_data_length < input_data_length) { + dprintk(KERN_ERR "tf_crypto_parse_command_message: " + "Short buffer: output_data_length = %d < " + "input_data_length = %d\n", + output_data_length, input_data_length); + goto err1; + } + } else { + output_data_length = 0; + output_data = NULL; + } + + /* + * Check if input length is compatible with the algorithm of the + * shortcut + */ + switch (cus->hwa_id) { + case RPC_AES1_CODE: + /* Must be multiple of the AES block size */ + if ((input_data_length % AES_BLOCK_SIZE) != 0) { + dprintk(KERN_ERR + "tf_crypto_parse_command_message(%p): "\ + "Input Data Length invalid [%d] for AES\n", + cus, input_data_length); + goto err1; + } + break; + case RPC_DES_CODE: + /* Must be multiple of the DES block size */ + if ((input_data_length % DES_BLOCK_SIZE) != 0) { + dprintk(KERN_ERR + "tf_crypto_parse_command_message(%p): "\ + "Input Data Length invalid [%d] for DES\n", + cus, input_data_length); + goto err1; + } + break; + default: + /* SHA operation: no constraint on data length */ + break; + } + + params->input_data = input_data; + params->input_data_length = input_data_length; + params->input_shmem = input_shmem; + params->output_data = output_data; + params->output_data_length = output_data_length; + params->output_shmem = output_shmem; + + return true; + +err1: + if (output_shmem) + atomic_dec(&output_shmem->ref_count); +err0: + if (input_shmem) + atomic_dec(&input_shmem->ref_count); + + return false; +} + +/*------------------------------------------------------------------------- */ + +/* + *Post-process the client command (crypto update operation), + *i.e. copy the result into the user output buffer and release the resources. + *THIS FUNCTION IS CALLED FROM THE USER THREAD (ioctl). + */ +static void tf_crypto_write_answer( + struct cus_context *cus, + struct cus_params *params, + struct tf_answer_invoke_client_command *answer) +{ + u32 error = S_SUCCESS; + + dprintk(KERN_INFO + "tf_crypto_write_answer(%p) : Session=0x%x\n", + cus, cus->client_session); + + /* Generate the answer */ + answer->message_size = + (sizeof(struct tf_answer_invoke_client_command) - + sizeof(struct tf_answer_header)) / 4; + answer->message_type = TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND; + answer->error_origin = TF_ORIGIN_TRUSTED_APP; + answer->operation_id = 0; + answer->error_code = error; + answer->answers[1].size.size = params->output_data_length; +} + +/*------------------------------------------------------------------------- */ + +int tf_crypto_try_shortcuted_update(struct tf_connection *connection, + struct tf_command_invoke_client_command *command, + struct tf_answer_invoke_client_command *answer) +{ + struct cus_context *cus = NULL; + + if (tf_crypto_is_shortcuted_command(connection, + (struct tf_command_invoke_client_command *) command, + &cus, false)) { + u32 hwa_id = cus->hwa_id; + + /* Lock HWA */ + tf_crypto_lock_hwa(hwa_id, LOCK_HWA); + + if (tf_crypto_is_shortcuted_command(connection, + command, + &cus, true)) { + struct cus_params cus_params; + + memset(&cus_params, 0, sizeof(cus_params)); + + if (!tf_crypto_parse_command_message( + connection, + cus, + command, + &cus_params)) { + /* Decrement CUS context use count */ + cus->use_count--; + + /* Release HWA lock */ + tf_crypto_lock_hwa(cus->hwa_id, + UNLOCK_HWA); + + return -1; + } + + /* Perform the update in public <=> THE shortcut */ + if (!tf_crypto_update(cus, &cus_params)) { + /* Decrement CUS context use count */ + cus->use_count--; + + /* Release HWA lock */ + tf_crypto_lock_hwa(cus->hwa_id, + UNLOCK_HWA); + + return -1; + } + + /* Write answer message */ + tf_crypto_write_answer(cus, + &cus_params, answer); + + /* Decrement registered shmems use count if needed */ + if (cus_params.input_shmem) + atomic_dec(&cus_params.input_shmem->ref_count); + if (cus_params.output_shmem) + atomic_dec(&cus_params.output_shmem->ref_count); + + /* Decrement CUS context use count */ + cus->use_count--; + + tf_crypto_lock_hwa(cus->hwa_id, + UNLOCK_HWA); + } else { + tf_crypto_lock_hwa(hwa_id, UNLOCK_HWA); + return -1; + } + } else { + return -1; + } + + return 0; +} + +/*------------------------------------------------------------------------- */ + +void tf_crypto_wait_for_ready_bit_infinitely(u32 *reg, u32 bit) +{ + while (!(INREG32(reg) & bit)) + ; +} + +/*------------------------------------------------------------------------- */ + +u32 tf_crypto_wait_for_ready_bit(u32 *reg, u32 bit) +{ + u32 timeoutCounter = PUBLIC_CRYPTO_TIMEOUT_CONST; + + while ((!(INREG32(reg) & bit)) && ((--timeoutCounter) != 0)) + ; + + if (timeoutCounter == 0) + return PUBLIC_CRYPTO_ERR_TIMEOUT; + + return PUBLIC_CRYPTO_OPERATION_SUCCESS; +} + +/*------------------------------------------------------------------------- */ + +static DEFINE_SPINLOCK(clk_lock); + +void tf_crypto_disable_clock(uint32_t clock_paddr) +{ + u32 *clock_reg; + u32 val; + unsigned long flags; + + dprintk(KERN_INFO "tf_crypto_disable_clock: " \ + "clock_paddr=0x%08X\n", + clock_paddr); + + /* Ensure none concurrent access when changing clock registers */ + spin_lock_irqsave(&clk_lock, flags); + + clock_reg = (u32 *)IO_ADDRESS(clock_paddr); + + val = __raw_readl(clock_reg); + val &= ~(0x3); + __raw_writel(val, clock_reg); + + /* Wait for clock to be fully disabled */ + while ((__raw_readl(clock_reg) & 0x30000) == 0) + ; + + spin_unlock_irqrestore(&clk_lock, flags); + + tf_l4sec_clkdm_allow_idle(true); +} + +/*------------------------------------------------------------------------- */ + +void tf_crypto_enable_clock(uint32_t clock_paddr) +{ + u32 *clock_reg; + u32 val; + unsigned long flags; + + dprintk(KERN_INFO "tf_crypto_enable_clock: " \ + "clock_paddr=0x%08X\n", + clock_paddr); + + tf_l4sec_clkdm_wakeup(true); + + /* Ensure none concurrent access when changing clock registers */ + spin_lock_irqsave(&clk_lock, flags); + + clock_reg = (u32 *)IO_ADDRESS(clock_paddr); + + val = __raw_readl(clock_reg); + val |= 0x2; + __raw_writel(val, clock_reg); + + /* Wait for clock to be fully enabled */ + while ((__raw_readl(clock_reg) & 0x30000) != 0) + ; + + spin_unlock_irqrestore(&clk_lock, flags); +} + +/*------------------------------------------------------------------------- */ +/* CUS RPCs */ +/*------------------------------------------------------------------------- */ +/* + * This RPC is used by the secure world to install a new shortcut. Optionally, + * for AES or DES/3DES operations, it can also lock the accelerator so that the + * secure world can install a new key in it. + */ +static int tf_crypto_install_shortcut_lock_hwa( + u32 rpc_command, void *rpc_shared_buffer) +{ + struct cus_context *cus = NULL; + struct tf_connection *connection = NULL; + + /* Reference the input/ouput data */ + struct rpc_install_shortcut_lock_accelerator_out *install_cus_out = + rpc_shared_buffer; + struct rpc_install_shortcut_lock_accelerator_in *install_cus_in = + rpc_shared_buffer; + + dprintk(KERN_INFO "tf_crypto_install_shortcut_lock_hwa: " + "rpc_command=0x%08x; hwa_id=0x%08x\n", + rpc_command, install_cus_in->hwa_id); + + connection = (struct tf_connection *) + install_cus_in->device_context_id; + + if (connection == NULL) { + dprintk(KERN_INFO + "tf_crypto_install_shortcut_lock_hwa: " + "DeviceContext 0x%08x does not exist, " + "cannot create Shortcut\n", + install_cus_in->device_context_id); + install_cus_out->error = -1; + return 0; + } + + /* + * Allocate a shortcut context. If the allocation fails, + * return S_ERROR_OUT_OF_MEMORY error code + */ + cus = (struct cus_context *) + internal_kmalloc(sizeof(*cus), GFP_KERNEL); + if (cus == NULL) { + dprintk(KERN_ERR + "tf_crypto_install_shortcut_lock_hwa: "\ + "Out of memory for public session\n"); + install_cus_out->error = S_ERROR_OUT_OF_MEMORY; + return 0; + } + + memset(cus, 0, sizeof(*cus)); + + /*setup the shortcut */ + cus->magic_number = CUS_CONTEXT_MAGIC; + cus->client_session = install_cus_in->client_session; + cus->command_id = install_cus_in->command_id; + cus->hwa_id = install_cus_in->hwa_id; + cus->hwa_ctrl = install_cus_in->hwa_ctrl; + cus->key_context = install_cus_in->key_context; + cus->use_count = 0; + cus->suspended = false; + + memcpy(&cus->operation_state, + &install_cus_in->operation_state, + sizeof(union tf_crypto_operation_state)); + + /*lock the shortcut_list_lock for this device context */ + spin_lock(&connection->shortcut_list_lock); + + /*Insert the shortcut in the list of shortcuts in the device context */ + list_add(&(cus->list), &(connection->shortcut_list)); + + /*release shortcut_list_lock */ + spin_unlock(&connection->shortcut_list_lock); + + /*fill the output structure */ + install_cus_out->shortcut_id = (u32) cus; + install_cus_out->error = S_SUCCESS; + + /*If the L bit is true, then: + * Enter the accelerator critical section. If an update is currently in + * progress on the accelerator (using g_hXXXKeyContext key), this will + * wait until the update has completed. This is call when secure wants + * to install a key in HWA, once it is done secure world will release + * the lock. For SHA (activate shortcut is always called without LOCK + * fag):do nothing + */ + if ((rpc_command & RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_LOCK) != 0) { + /*Lock the HWA */ + tf_crypto_lock_hwa(cus->hwa_id, LOCK_HWA); + } + + dprintk(KERN_INFO + "tf_crypto_install_shortcut_lock_hwa: Done\n"); + + return S_SUCCESS; +} + +/*------------------------------------------------------------------------- */ + +/* + * This RPC is used to perform one or several of the following operations + * - Lock one or several accelerators for the exclusive use by the secure world, + * either because it is going to be switched to secure or because a new key is + * going to be loaded in the accelerator + * - Suspend a shortcut, i.e., make it temporarily unavailable to the public + * world. This is used when a secure update is going to be performed on the + * operation. The answer to the RPC then contains the operation state + * necessary for the secure world to do the update. + * - Uninstall the shortcut + */ +static int tf_crypto_lock_hwas_suspend_shortcut( + u32 rpc_command, void *rpc_shared_buffer) +{ + u32 target_shortcut; + struct cus_context *cus = NULL; + struct tf_connection *connection = NULL; + + /*reference the input/ouput data */ + struct rpc_lock_hwa_suspend_shortcut_out *suspend_cus_out = + rpc_shared_buffer; + struct rpc_lock_hwa_suspend_shortcut_in *suspend_cus_in = + rpc_shared_buffer; + + dprintk(KERN_INFO + "tf_crypto_lock_hwas_suspend_shortcut: "\ + "suspend_cus_in=0x%08x; shortcut_id=0x%08x\n", + suspend_cus_in->shortcut_id, (u32)suspend_cus_in); + + target_shortcut = suspend_cus_in->shortcut_id; + + /*lock HWAs */ + tf_crypto_lock_hwas(rpc_command, LOCK_HWA); + + /*if suspend_cus_in->shortcut_id != 0 and if rpc_command.S != 0, + then, suspend shortcut */ + if ((target_shortcut != 0) && ((rpc_command & + RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_SUSPEND) != 0)) { + /*reference the CUSContext */ + cus = (struct cus_context *) + suspend_cus_in->shortcut_id; + + /*preventive check1: return if shortcut does not exist */ + connection = tf_get_device_context(cus); + if (connection == NULL) { + dprintk(KERN_INFO + "tf_crypto_lock_hwas_suspend_shortcut: "\ + "shortcut_id=0x%08x does not exist, cannot suspend "\ + "Shortcut\n", + suspend_cus_in->shortcut_id); + return -1; + } + +loop_on_suspend: + /*lock shortcut_list_lock associated with the + *device context */ + spin_lock(&connection->shortcut_list_lock); + + /*Suspend shortcut */ + cus->suspended = true; + + if (cus->use_count != 0) { + /*release shortcut_list_lock */ + spin_unlock(&connection-> + shortcut_list_lock); + schedule(); + goto loop_on_suspend; + } + + /*Copy the operation state data stored in CUS Context into the + *answer to the RPC output assuming that HWA register has been + *saved at update time */ + memcpy(&suspend_cus_out->operation_state, + &cus->operation_state, + sizeof(union tf_crypto_operation_state)); + + /*Uninstall shortcut if requiered */ + if ((rpc_command & + RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_UNINSTALL) != 0) { + dprintk(KERN_INFO + "tf_crypto_lock_hwas_suspend_shortcut:"\ + "Uninstall 0x%08x\n", + target_shortcut); + list_del(&(cus->list)); + /*list_del only remove the item in the list, the + *memory must be free afterward */ + /*release the lock before calling internal_kfree */ + spin_unlock(&connection-> + shortcut_list_lock); + if (cus != NULL) + internal_kfree(cus); + return 0; + } + + /*release shortcut_list_lock */ + spin_unlock(&connection->shortcut_list_lock); + } + + return 0; +} + +/*------------------------------------------------------------------------- */ + +/* + * This RPC is used to perform one or several of the following operations: + * - Resume a shortcut previously suspended + * - Inform the public driver of the new keys installed in the DES and AES + * accelerators + * - Unlock some of the accelerators + */ +static int tf_crypto_resume_shortcut_unlock_hwas( + u32 rpc_command, void *rpc_shared_buffer) +{ + struct tf_device *dev = tf_get_device(); + struct tf_connection *connection = NULL; + struct cus_context *cus = NULL; + + /*reference the input data */ + struct rpc_resume_shortcut_unlock_hwa_in *resume_cus_in = + rpc_shared_buffer; + + dprintk(KERN_INFO + "tf_crypto_resume_shortcut_unlock_hwas\n" + "rpc_command=0x%08x\nshortcut_id=0x%08x\n", + rpc_command, resume_cus_in->shortcut_id); + + /*if shortcut_id not 0 resume the shortcut and unlock HWA + else only unlock HWA */ + if (resume_cus_in->shortcut_id != 0) { + /*reference the CUSContext */ + cus = (struct cus_context *) + resume_cus_in->shortcut_id; + + /*preventive check1: return if shortcut does not exist + *else, points to the public crypto monitor (inside the device + *context) */ + connection = tf_get_device_context(cus); + if (connection == NULL) { + dprintk(KERN_INFO + "tf_crypto_resume_shortcut_unlock_hwas(...):"\ + "shortcut_id 0x%08x does not exist, cannot suspend "\ + "Shortcut\n", + resume_cus_in->shortcut_id); + return -1; + } + + /*if S set and shortcut not yet suspended */ + if ((cus->suspended) && + ((rpc_command & + RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_RESUME) != 0)){ + /*Write operation_stateData in the shortcut context */ + memcpy(&cus->operation_state, + &resume_cus_in->operation_state, + sizeof(union tf_crypto_operation_state)); + /*resume the shortcut */ + cus->suspended = false; + } + } + + /* + * If A is set: Atomically set aes1_key_context to + * aes1_key_context + */ + if ((rpc_command & + RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES1) != 0) { + dev->aes1_key_context = + resume_cus_in->aes1_key_context; + } + + /* + * If D is set: + * Atomically set des_key_context to des_key_context + */ + if ((rpc_command & + RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_DES) != 0) { + dev->des_key_context = + resume_cus_in->des_key_context; + } + + /* H is never set by the PA: Atomically set sham1_is_public to true */ + dev->sham1_is_public = true; + + /* Unlock HWAs according rpc_command */ + tf_crypto_lock_hwas(rpc_command, UNLOCK_HWA); + + return 0; +} + +/*------------------------------------------------------------------------- */ + +/* + * This RPC is used to notify the public driver that the key in the AES, DES + * accelerators has been cleared. This happens only when the key is no longer + * referenced by any shortcuts. So, it is guaranteed that no-one has entered the + * accelerators critical section and there is no need to enter it to implement + * this RPC. + */ +static int tf_crypto_clear_global_key_context( + u32 rpc_command, void *rpc_shared_buffer) +{ + struct tf_device *dev = tf_get_device(); + + /* + * If A is set: Atomically set aes1_key_context to 0 + */ + if ((rpc_command & + RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES1) != 0) { + dev->aes1_key_context = 0; + } + + /* + *If D is set: Atomically set des_key_context to 0 + */ + if ((rpc_command & + RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_DES) != 0) { + dev->des_key_context = 0; + } + + return 0; +} + +/*------------------------------------------------------------------------- */ +/* + * Execute a public crypto related RPC + */ + +int tf_crypto_execute_rpc(u32 rpc_command, void *rpc_shared_buffer) +{ + switch (rpc_command & RPC_CRYPTO_COMMAND_MASK) { + case RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR: + dprintk(KERN_INFO "RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR\n"); + return tf_crypto_install_shortcut_lock_hwa( + rpc_command, rpc_shared_buffer); + + case RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT: + dprintk(KERN_INFO "RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT\n"); + return tf_crypto_lock_hwas_suspend_shortcut( + rpc_command, rpc_shared_buffer); + + case RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS: + dprintk(KERN_INFO "RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS\n"); + return tf_crypto_resume_shortcut_unlock_hwas( + rpc_command, rpc_shared_buffer); + + case RPC_CLEAR_GLOBAL_KEY_CONTEXT: + dprintk(KERN_INFO "RPC_CLEAR_GLOBAL_KEY_CONTEXT\n"); + return tf_crypto_clear_global_key_context( + rpc_command, rpc_shared_buffer); + } + + return -1; +} diff --git a/security/smc/omap4/scx_public_crypto.h b/security/smc/tf_crypto.h index 984cb18..2291439 100644 --- a/security/smc/omap4/scx_public_crypto.h +++ b/security/smc/tf_crypto.h @@ -1,34 +1,39 @@ -/* - * Copyright (c)2006-2008 Trusted Logic S.A. +/** + * Copyright (c) 2011 Trusted Logic S.A. * All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place, Suite 330, Boston, MA 02111-1307 USA + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA */ -#ifndef __SCX_PUBLIC_CRYPTO_H -#define __SCX_PUBLIC_CRYPTO_H +#ifndef __TF_PUBLIC_CRYPTO_H +#define __TF_PUBLIC_CRYPTO_H -#include "scxlnx_defs.h" +#include "tf_defs.h" #include <linux/io.h> #include <mach/io.h> #include <clockdomain.h> +#ifdef __ASM_ARM_ARCH_OMAP_CLOCKDOMAIN_H +#define clkdm_wakeup omap2_clkdm_wakeup +#define clkdm_allow_idle omap2_clkdm_allow_idle +#endif + /*-------------------------------------------------------------------------- */ #define PUBLIC_CRYPTO_HWA_AES1 0x1 -#define PUBLIC_CRYPTO_HWA_AES2 0x2 #define PUBLIC_CRYPTO_HWA_DES 0x4 #define PUBLIC_CRYPTO_HWA_SHA 0x8 @@ -39,7 +44,6 @@ #define PUBLIC_CRYPTO_CLKSTCTRL_CLOCK_REG 0x4A009580 #define PUBLIC_CRYPTO_AES1_CLOCK_REG 0x4A0095A0 -#define PUBLIC_CRYPTO_AES2_CLOCK_REG 0x4A0095A8 #define PUBLIC_CRYPTO_DES3DES_CLOCK_REG 0x4A0095B0 #define PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG 0x4A0095C8 @@ -110,14 +114,14 @@ /* *The magic word. */ -#define CRYPTOKI_UPDATE_SHORTCUT_CONTEXT_MAGIC 0x45EF683C +#define CUS_CONTEXT_MAGIC 0x45EF683C /*-------------------------------------------------------------------------- */ /* CUS context structure */ /*-------------------------------------------------------------------------- */ /* State of an AES operation */ -struct PUBLIC_CRYPTO_AES_OPERATION_STATE { +struct tf_crypto_aes_operation_state { u32 AES_IV_0; u32 AES_IV_1; u32 AES_IV_2; @@ -138,14 +142,14 @@ struct PUBLIC_CRYPTO_AES_OPERATION_STATE { u32 key_is_public; }; -struct PUBLIC_CRYPTO_DES_OPERATION_STATE { +struct tf_crypto_des_operation_state { u32 DES_IV_L; u32 DES_IV_H; }; #define HASH_BLOCK_BYTES_LENGTH 64 -struct PUBLIC_CRYPTO_SHA_OPERATION_STATE { +struct tf_crypto_sha_operation_state { /* Current digest */ u32 SHA_DIGEST_A; u32 SHA_DIGEST_B; @@ -157,63 +161,60 @@ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE { u32 SHA_DIGEST_H; /* This buffer contains a partial chunk */ - u8 pChunkBuffer[HASH_BLOCK_BYTES_LENGTH]; + u8 chunk_buffer[HASH_BLOCK_BYTES_LENGTH]; - /* Number of bytes stored in pChunkBuffer (0..64) */ - u32 nChunkLength; + /* Number of bytes stored in chunk_buffer (0..64) */ + u32 chunk_length; /* * Total number of bytes processed so far * (not including the partial chunk) */ - u32 nBytesProcessed; + u32 bytes_processed; u32 CTRL; }; -union PUBLIC_CRYPTO_OPERATION_STATE { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE aes; - struct PUBLIC_CRYPTO_DES_OPERATION_STATE des; - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE sha; +union tf_crypto_operation_state { + struct tf_crypto_aes_operation_state aes; + struct tf_crypto_des_operation_state des; + struct tf_crypto_sha_operation_state sha; }; /* *Fully describes a public crypto operation *(i.e., an operation that has a shortcut attached). */ -struct CRYPTOKI_UPDATE_SHORTCUT_CONTEXT { +struct cus_context { /* *Identifies the public crypto operation in the list of all public *operations. */ struct list_head list; - u32 nMagicNumber; /*Must be set to - *{CRYPTOKI_UPDATE_SHORTCUT_CONTEXT_MAGIC} */ + u32 magic_number; /*Must be set to + *{CUS_CONTEXT_MAGIC} */ /*basic fields */ - u32 hClientSession; - u32 nCommandID; - u32 nHWAID; - u32 nHWA_CTRL; - u32 hKeyContext; - union PUBLIC_CRYPTO_OPERATION_STATE sOperationState; - u32 nUseCount; - bool bSuspended; + u32 client_session; + u32 command_id; + u32 hwa_id; + u32 hwa_ctrl; + u32 key_context; + union tf_crypto_operation_state operation_state; + u32 use_count; + bool suspended; }; -struct CRYPTOKI_UPDATE_PARAMS { +struct cus_params { /*fields for data processing of an update command */ - u32 nInputDataLength; - u8 *pInputData; - struct SCXLNX_SHMEM_DESC *pInputShmem; - - u32 nResultDataLength; - u8 *pResultData; - struct SCXLNX_SHMEM_DESC *pOutputShmem; + u32 input_data_length; + u8 *input_data; + struct tf_shmem_desc *input_shmem; - u8 *pS2CDataBuffer; - u32 nS2CDataBufferMaxLength; + u32 output_data_length; + u8 *output_data; + struct tf_shmem_desc *output_shmem; }; /*-------------------------------------------------------------------------- */ @@ -224,46 +225,46 @@ struct CRYPTOKI_UPDATE_PARAMS { /* *Initialize the public crypto DMA chanels and global HWA semaphores */ -u32 SCXPublicCryptoInit(void); +u32 tf_crypto_init(void); /* *Initialize the device context CUS fields *(shortcut semaphore and public CUS list) */ -void SCXPublicCryptoInitDeviceContext(struct SCXLNX_CONNECTION *pDeviceContext); +void tf_crypto_init_cus(struct tf_connection *connection); /** *Terminate the public crypto (including DMA) */ -void SCXPublicCryptoTerminate(void); +void tf_crypto_terminate(void); -int SCXPublicCryptoTryShortcutedUpdate(struct SCXLNX_CONNECTION *pConn, - struct SCX_COMMAND_INVOKE_CLIENT_COMMAND *pMessage, - struct SCX_ANSWER_INVOKE_CLIENT_COMMAND *pAnswer); +int tf_crypto_try_shortcuted_update(struct tf_connection *connection, + struct tf_command_invoke_client_command *command, + struct tf_answer_invoke_client_command *answer); -int SCXPublicCryptoExecuteRPCCommand(u32 nRPCCommand, void *pRPCSharedBuffer); +int tf_crypto_execute_rpc(u32 rpc_command, void *rpc_shared_buffer); /*-------------------------------------------------------------------------- */ /* *Helper methods */ -u32 SCXPublicCryptoWaitForReadyBit(u32 *pRegister, u32 vBit); -void SCXPublicCryptoWaitForReadyBitInfinitely(u32 *pRegister, u32 vBit); +u32 tf_crypto_wait_for_ready_bit(u32 *reg, u32 bit); +void tf_crypto_wait_for_ready_bit_infinitely(u32 *reg, u32 bit); -void SCXPublicCryptoEnableClock(uint32_t vClockPhysAddr); -void SCXPublicCryptoDisableClock(uint32_t vClockPhysAddr); +void tf_crypto_enable_clock(uint32_t clock_paddr); +void tf_crypto_disable_clock(uint32_t clock_paddr); #define LOCK_HWA true #define UNLOCK_HWA false -void PDrvCryptoLockUnlockHWA(u32 nHWAID, bool bDoLock); +void tf_crypto_lock_hwa(u32 hwa_id, bool do_lock); /*---------------------------------------------------------------------------*/ /* AES operations */ /*---------------------------------------------------------------------------*/ -void PDrvCryptoAESInit(void); -void PDrvCryptoAESExit(void); +void tf_aes_init(void); +void tf_aes_exit(void); #ifdef CONFIG_SMC_KERNEL_CRYPTO int register_smc_public_crypto_aes(void); @@ -283,21 +284,21 @@ static inline void unregister_smc_public_crypto_aes(void) {} *The AES1 accelerator is assumed loaded with the correct key * *AES_CTRL: defines the mode and direction - *pAESState: defines the operation IV - *pSrc: Input buffer to process. - *pDest: Output buffer containing the processed data. + *aes_state: defines the operation IV + *src: Input buffer to process. + *dest: Output buffer containing the processed data. * - *nbBlocks number of block(s)to process. + *nb_blocks number of block(s)to process. */ -bool PDrvCryptoUpdateAES(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState, - u8 *pSrc, u8 *pDest, u32 nbBlocks); +bool tf_aes_update(struct tf_crypto_aes_operation_state *aes_state, + u8 *src, u8 *dest, u32 nb_blocks); /*---------------------------------------------------------------------------*/ /* DES/DES3 operations */ /*---------------------------------------------------------------------------*/ -void PDrvCryptoDESInit(void); -void PDrvCryptoDESExit(void); +void tf_des_init(void); +void tf_des_exit(void); /** *This function performs a DES update operation. @@ -305,21 +306,21 @@ void PDrvCryptoDESExit(void); *The DES accelerator is assumed loaded with the correct key * *DES_CTRL: defines the mode and direction - *pDESState: defines the operation IV - *pSrc: Input buffer to process. - *pDest: Output buffer containing the processed data. - *nbBlocks: Number of block(s)to process. + *des_state: defines the operation IV + *src: Input buffer to process. + *dest: Output buffer containing the processed data. + *nb_blocks: Number of block(s)to process. */ -bool PDrvCryptoUpdateDES(u32 DES_CTRL, - struct PUBLIC_CRYPTO_DES_OPERATION_STATE *pDESState, - u8 *pSrc, u8 *pDest, u32 nbBlocks); +bool tf_des_update(u32 DES_CTRL, + struct tf_crypto_des_operation_state *des_state, + u8 *src, u8 *dest, u32 nb_blocks); /*---------------------------------------------------------------------------*/ /* Digest operations */ /*---------------------------------------------------------------------------*/ -void PDrvCryptoDigestInit(void); -void PDrvCryptoDigestExit(void); +void tf_digest_init(void); +void tf_digest_exit(void); #ifdef CONFIG_SMC_KERNEL_CRYPTO int register_smc_public_crypto_digest(void); @@ -337,12 +338,12 @@ static inline void unregister_smc_public_crypto_digest(void) {} *This function performs a HASH update Operation. * *SHA_CTRL: defines the algorithm - *pSHAState: State of the operation - *pData: Input buffer to process - *dataLength: Length in bytes of the input buffer. + *sha_state: State of the operation + *data: Input buffer to process + *data_length: Length in bytes of the input buffer. */ -void PDrvCryptoUpdateHash( - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState, - u8 *pData, u32 dataLength); +bool tf_digest_update( + struct tf_crypto_sha_operation_state *sha_state, + u8 *data, u32 data_length); -#endif /*__SCX_PUBLIC_CRYPTO_H */ +#endif /*__TF_PUBLIC_CRYPTO_H */ diff --git a/security/smc/omap4/scx_public_crypto_AES.c b/security/smc/tf_crypto_aes.c index 96b065f..36dc522 100644 --- a/security/smc/omap4/scx_public_crypto_AES.c +++ b/security/smc/tf_crypto_aes.c @@ -1,26 +1,27 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. +/** + * Copyright (c) 2011 Trusted Logic S.A. * All Rights Reserved. * - * This program is free software; you can redistribute it and/or modify it under - * the terms of the GNU General Public License version 2 as published by the - * Free Software Foundation. + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more - * details. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place, Suite 330, Boston, MA 02111-1307 USA + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA */ -#include "scxlnx_defs.h" -#include "scxlnx_util.h" -#include "scx_public_crypto.h" -#include "scx_public_dma.h" -#include "scxlnx_mshield.h" +#include "tf_defs.h" +#include "tf_util.h" +#include "tf_crypto.h" +#include "tf_dma.h" +#include "tf_zebra.h" #include <linux/io.h> #include <linux/interrupt.h> @@ -35,7 +36,6 @@ *AES Hardware Accelerator: Base address */ #define AES1_REGS_HW_ADDR 0x4B501000 -#define AES2_REGS_HW_ADDR 0x4B701000 /* *CTRL register Masks @@ -80,7 +80,7 @@ /** *This structure contains the registers of the AES HW accelerator. */ -struct AESReg_t { +struct aes_reg { u32 AES_KEY2_6; /* 0x00 */ u32 AES_KEY2_7; /* 0xO4 */ u32 AES_KEY2_4; /* 0x08 */ @@ -126,7 +126,7 @@ struct AESReg_t { u32 AES_SYSSTATUS; /* 0x88 */ }; -static struct AESReg_t *pAESReg_t; +static struct aes_reg *paes_reg; #ifdef CONFIG_SMC_KERNEL_CRYPTO #define FLAGS_FAST BIT(7) @@ -158,7 +158,7 @@ struct aes_hwa_ctx { int dma_lch_out; dma_addr_t dma_addr_out; - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *ctx; + struct tf_crypto_aes_operation_state *ctx; }; static struct aes_hwa_ctx *aes_ctx; #endif @@ -167,69 +167,68 @@ static struct aes_hwa_ctx *aes_ctx; *Forward declarations *------------------------------------------------------------------------- */ -static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest, - u32 nbBlocks); +static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, bool is_kernel); /*---------------------------------------------------------------------------- *Save HWA registers into the specified operation state structure *--------------------------------------------------------------------------*/ -static void PDrvCryptoSaveAESRegisters( - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState) +static void tf_aes_save_registers( + struct tf_crypto_aes_operation_state *aes_state) { - dprintk(KERN_INFO "PDrvCryptoSaveAESRegisters: \ - pAESState(%p) <- pAESReg_t(%p): CTRL=0x%08x\n", - pAESState, pAESReg_t, pAESState->CTRL); + dprintk(KERN_INFO "tf_aes_save_registers: " + "aes_state(%p) <- paes_reg(%p): CTRL=0x%08x\n", + aes_state, paes_reg, aes_state->CTRL); /*Save the IV if we are in CBC or CTR mode (not required for ECB) */ - if (!AES_CTRL_IS_MODE_ECB(pAESState->CTRL)) { - pAESState->AES_IV_0 = INREG32(&pAESReg_t->AES_IV_IN_0); - pAESState->AES_IV_1 = INREG32(&pAESReg_t->AES_IV_IN_1); - pAESState->AES_IV_2 = INREG32(&pAESReg_t->AES_IV_IN_2); - pAESState->AES_IV_3 = INREG32(&pAESReg_t->AES_IV_IN_3); + if (!AES_CTRL_IS_MODE_ECB(aes_state->CTRL)) { + aes_state->AES_IV_0 = INREG32(&paes_reg->AES_IV_IN_0); + aes_state->AES_IV_1 = INREG32(&paes_reg->AES_IV_IN_1); + aes_state->AES_IV_2 = INREG32(&paes_reg->AES_IV_IN_2); + aes_state->AES_IV_3 = INREG32(&paes_reg->AES_IV_IN_3); } } /*---------------------------------------------------------------------------- *Restore the HWA registers from the operation state structure *---------------------------------------------------------------------------*/ -static void PDrvCryptoRestoreAESRegisters( - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState) +static void tf_aes_restore_registers( + struct tf_crypto_aes_operation_state *aes_state) { - struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice(); - - dprintk(KERN_INFO "PDrvCryptoRestoreAESRegisters: \ - pAESReg_t(%p) <- pAESState(%p): CTRL=0x%08x\n", - pAESReg_t, pAESState, pAESState->CTRL); - - if (pAESState->key_is_public) { - OUTREG32(&pAESReg_t->AES_KEY1_0, pAESState->KEY1_0); - OUTREG32(&pAESReg_t->AES_KEY1_1, pAESState->KEY1_1); - OUTREG32(&pAESReg_t->AES_KEY1_2, pAESState->KEY1_2); - OUTREG32(&pAESReg_t->AES_KEY1_3, pAESState->KEY1_3); - OUTREG32(&pAESReg_t->AES_KEY1_4, pAESState->KEY1_4); - OUTREG32(&pAESReg_t->AES_KEY1_5, pAESState->KEY1_5); - OUTREG32(&pAESReg_t->AES_KEY1_6, pAESState->KEY1_6); - OUTREG32(&pAESReg_t->AES_KEY1_7, pAESState->KEY1_7); + struct tf_device *dev = tf_get_device(); + + dprintk(KERN_INFO "tf_aes_restore_registers: " + "paes_reg(%p) <- aes_state(%p): CTRL=0x%08x\n", + paes_reg, aes_state, aes_state->CTRL); + + if (aes_state->key_is_public) { + OUTREG32(&paes_reg->AES_KEY1_0, aes_state->KEY1_0); + OUTREG32(&paes_reg->AES_KEY1_1, aes_state->KEY1_1); + OUTREG32(&paes_reg->AES_KEY1_2, aes_state->KEY1_2); + OUTREG32(&paes_reg->AES_KEY1_3, aes_state->KEY1_3); + OUTREG32(&paes_reg->AES_KEY1_4, aes_state->KEY1_4); + OUTREG32(&paes_reg->AES_KEY1_5, aes_state->KEY1_5); + OUTREG32(&paes_reg->AES_KEY1_6, aes_state->KEY1_6); + OUTREG32(&paes_reg->AES_KEY1_7, aes_state->KEY1_7); /* * Make sure a potential secure key that has been overwritten by * the previous code is reinstalled before performing other * public crypto operations. */ - pDevice->hAES1SecureKeyContext = 0; + dev->aes1_key_context = 0; } else { - pAESState->CTRL |= INREG32(&pAESReg_t->AES_CTRL); + aes_state->CTRL |= INREG32(&paes_reg->AES_CTRL); } /* * Restore the IV first if we are in CBC or CTR mode * (not required for ECB) */ - if (!AES_CTRL_IS_MODE_ECB(pAESState->CTRL)) { - OUTREG32(&pAESReg_t->AES_IV_IN_0, pAESState->AES_IV_0); - OUTREG32(&pAESReg_t->AES_IV_IN_1, pAESState->AES_IV_1); - OUTREG32(&pAESReg_t->AES_IV_IN_2, pAESState->AES_IV_2); - OUTREG32(&pAESReg_t->AES_IV_IN_3, pAESState->AES_IV_3); + if (!AES_CTRL_IS_MODE_ECB(aes_state->CTRL)) { + OUTREG32(&paes_reg->AES_IV_IN_0, aes_state->AES_IV_0); + OUTREG32(&paes_reg->AES_IV_IN_1, aes_state->AES_IV_1); + OUTREG32(&paes_reg->AES_IV_IN_2, aes_state->AES_IV_2); + OUTREG32(&paes_reg->AES_IV_IN_3, aes_state->AES_IV_3); } /* Then set the CTRL register: @@ -237,126 +236,153 @@ static void PDrvCryptoRestoreAESRegisters( * it leads to break the HWA process (observed by experimentation) */ - pAESState->CTRL = (pAESState->CTRL & (3 << 3)) /* key size */ - | (pAESState->CTRL & ((1 << 2) | (1 << 5) | (1 << 6))) + aes_state->CTRL = (aes_state->CTRL & (3 << 3)) /* key size */ + | (aes_state->CTRL & ((1 << 2) | (1 << 5) | (1 << 6))) | (0x3 << 7) /* Always set CTR_WIDTH to 128-bit */; - if ((pAESState->CTRL & 0x1FC) != - (INREG32(&pAESReg_t->AES_CTRL) & 0x1FC)) - OUTREG32(&pAESReg_t->AES_CTRL, pAESState->CTRL & 0x1FC); + if ((aes_state->CTRL & 0x1FC) != + (INREG32(&paes_reg->AES_CTRL) & 0x1FC)) + OUTREG32(&paes_reg->AES_CTRL, aes_state->CTRL & 0x1FC); /* Set the SYSCONFIG register to 0 */ - OUTREG32(&pAESReg_t->AES_SYSCONFIG, 0); + OUTREG32(&paes_reg->AES_SYSCONFIG, 0); } /*-------------------------------------------------------------------------- */ -void PDrvCryptoAESInit(void) +void tf_aes_init(void) { - pAESReg_t = omap_ioremap(AES1_REGS_HW_ADDR, SZ_1M, MT_DEVICE); - if (pAESReg_t == NULL) + paes_reg = omap_ioremap(AES1_REGS_HW_ADDR, SZ_1M, MT_DEVICE); + if (paes_reg == NULL) panic("Unable to remap AES1 module"); } -void PDrvCryptoAESExit(void) +void tf_aes_exit(void) { - omap_iounmap(pAESReg_t); + omap_iounmap(paes_reg); } -bool PDrvCryptoUpdateAES(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState, - u8 *pSrc, u8 *pDest, u32 nbBlocks) +bool tf_aes_update(struct tf_crypto_aes_operation_state *aes_state, + u8 *src, u8 *dest, u32 nb_blocks) { u32 nbr_of_blocks; - u32 vTemp; - u8 *pProcessSrc = pSrc; - u8 *pProcessDest = pDest; - u32 dmaUse = PUBLIC_CRYPTO_DMA_USE_NONE; + u32 temp; + u8 *process_src; + u8 *process_dest; + u32 dma_use = PUBLIC_CRYPTO_DMA_USE_NONE; + bool is_kernel = false; /* *Choice of the processing type */ - if (nbBlocks * AES_BLOCK_SIZE >= DMA_TRIGGER_IRQ_AES) - dmaUse = PUBLIC_CRYPTO_DMA_USE_IRQ; - - dprintk(KERN_INFO "PDrvCryptoUpdateAES: \ - pSrc=0x%08x, pDest=0x%08x, nbBlocks=0x%08x, dmaUse=0x%08x\n", - (unsigned int)pSrc, - (unsigned int)pDest, - (unsigned int)nbBlocks, - (unsigned int)dmaUse); - - if (nbBlocks == 0) { - dprintk(KERN_INFO "PDrvCryptoUpdateAES: Nothing to process\n"); + if (nb_blocks * AES_BLOCK_SIZE >= DMA_TRIGGER_IRQ_AES) + dma_use = PUBLIC_CRYPTO_DMA_USE_IRQ; + + dprintk(KERN_INFO "tf_aes_update: " + "src=0x%08x, dest=0x%08x, nb_blocks=0x%08x, dma_use=0x%08x\n", + (unsigned int)src, + (unsigned int)dest, + (unsigned int)nb_blocks, + (unsigned int)dma_use); + + if (aes_state->key_is_public) + is_kernel = true; + + if (nb_blocks == 0) { + dprintk(KERN_INFO "tf_aes_update: Nothing to process\n"); return true; } - if ((AES_CTRL_GET_DIRECTION(INREG32(&pAESReg_t->AES_CTRL)) != - AES_CTRL_GET_DIRECTION(pAESState->CTRL)) && - !pAESState->key_is_public) { + if ((AES_CTRL_GET_DIRECTION(INREG32(&paes_reg->AES_CTRL)) != + AES_CTRL_GET_DIRECTION(aes_state->CTRL)) && + !aes_state->key_is_public) { dprintk(KERN_WARNING "HWA configured for another direction\n"); return false; } /*Restore the registers of the accelerator from the operation state */ - PDrvCryptoRestoreAESRegisters(pAESState); + tf_aes_restore_registers(aes_state); - if (dmaUse == PUBLIC_CRYPTO_DMA_USE_IRQ) { + if (dma_use == PUBLIC_CRYPTO_DMA_USE_IRQ) { /* Perform the update with DMA */ - PDrvCryptoUpdateAESWithDMA(pProcessSrc, - pProcessDest, nbBlocks); + if (!tf_aes_update_dma(src, dest, nb_blocks, is_kernel)) + return false; } else { + u8 buf[DMA_TRIGGER_IRQ_AES]; + + /* + * Synchronous Linux crypto API buffers are mapped in kernel + * space + */ + + if (is_kernel) { + process_src = src; + process_dest = dest; + } else { + if (copy_from_user(buf, src, + nb_blocks * AES_BLOCK_SIZE)) + return false; + + process_src = process_dest = buf; + } + for (nbr_of_blocks = 0; - nbr_of_blocks < nbBlocks; nbr_of_blocks++) { + nbr_of_blocks < nb_blocks; nbr_of_blocks++) { /*We wait for the input ready */ /*Crash the system as this should never occur */ - if (SCXPublicCryptoWaitForReadyBit( - (u32 *)&pAESReg_t->AES_CTRL, + if (tf_crypto_wait_for_ready_bit( + (u32 *)&paes_reg->AES_CTRL, AES_CTRL_INPUT_READY_BIT) != PUBLIC_CRYPTO_OPERATION_SUCCESS) - panic("Wait too long for AES hardware \ - accelerator Input data to be ready\n"); + panic("Wait too long for AES hardware " + "accelerator Input data to be ready\n"); /* We copy the 16 bytes of data src->reg */ - vTemp = (u32) BYTES_TO_LONG(pProcessSrc); - OUTREG32(&pAESReg_t->AES_DATA_IN_0, vTemp); - pProcessSrc += 4; - vTemp = (u32) BYTES_TO_LONG(pProcessSrc); - OUTREG32(&pAESReg_t->AES_DATA_IN_1, vTemp); - pProcessSrc += 4; - vTemp = (u32) BYTES_TO_LONG(pProcessSrc); - OUTREG32(&pAESReg_t->AES_DATA_IN_2, vTemp); - pProcessSrc += 4; - vTemp = (u32) BYTES_TO_LONG(pProcessSrc); - OUTREG32(&pAESReg_t->AES_DATA_IN_3, vTemp); - pProcessSrc += 4; + temp = (u32) BYTES_TO_LONG(process_src); + OUTREG32(&paes_reg->AES_DATA_IN_0, temp); + process_src += 4; + temp = (u32) BYTES_TO_LONG(process_src); + OUTREG32(&paes_reg->AES_DATA_IN_1, temp); + process_src += 4; + temp = (u32) BYTES_TO_LONG(process_src); + OUTREG32(&paes_reg->AES_DATA_IN_2, temp); + process_src += 4; + temp = (u32) BYTES_TO_LONG(process_src); + OUTREG32(&paes_reg->AES_DATA_IN_3, temp); + process_src += 4; /* We wait for the output ready */ - SCXPublicCryptoWaitForReadyBitInfinitely( - (u32 *)&pAESReg_t->AES_CTRL, + tf_crypto_wait_for_ready_bit_infinitely( + (u32 *)&paes_reg->AES_CTRL, AES_CTRL_OUTPUT_READY_BIT); /* We copy the 16 bytes of data reg->dest */ - vTemp = INREG32(&pAESReg_t->AES_DATA_IN_0); - LONG_TO_BYTE(pProcessDest, vTemp); - pProcessDest += 4; - vTemp = INREG32(&pAESReg_t->AES_DATA_IN_1); - LONG_TO_BYTE(pProcessDest, vTemp); - pProcessDest += 4; - vTemp = INREG32(&pAESReg_t->AES_DATA_IN_2); - LONG_TO_BYTE(pProcessDest, vTemp); - pProcessDest += 4; - vTemp = INREG32(&pAESReg_t->AES_DATA_IN_3); - LONG_TO_BYTE(pProcessDest, vTemp); - pProcessDest += 4; + temp = INREG32(&paes_reg->AES_DATA_IN_0); + LONG_TO_BYTE(process_dest, temp); + process_dest += 4; + temp = INREG32(&paes_reg->AES_DATA_IN_1); + LONG_TO_BYTE(process_dest, temp); + process_dest += 4; + temp = INREG32(&paes_reg->AES_DATA_IN_2); + LONG_TO_BYTE(process_dest, temp); + process_dest += 4; + temp = INREG32(&paes_reg->AES_DATA_IN_3); + LONG_TO_BYTE(process_dest, temp); + process_dest += 4; } + + if (!is_kernel) + if (copy_to_user(dest, buf, + nb_blocks * AES_BLOCK_SIZE)) + return false; } /* Save the accelerator registers into the operation state */ - PDrvCryptoSaveAESRegisters(pAESState); + tf_aes_save_registers(aes_state); - dprintk(KERN_INFO "PDrvCryptoUpdateAES: Done\n"); + dprintk(KERN_INFO "tf_aes_update: Done\n"); return true; } @@ -366,13 +392,13 @@ bool PDrvCryptoUpdateAES(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState, *Static function, perform AES encryption/decryption using the DMA for data *transfer. * - *inputs: pSrc : pointer of the input data to process - * nbBlocks : number of block to process - * dmaUse : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA) + *inputs: src : pointer of the input data to process + * nb_blocks : number of block to process + * dma_use : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA) * | PUBLIC_CRYPTO_DMA_USE_POLLING (poll the end of DMA) - *output: pDest : pointer of the output data (can be eq to pSrc) + *output: dest : pointer of the output data (can be eq to src) */ -static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest, u32 nbBlocks) +static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, bool is_kernel) { /* *Note: The DMA only sees physical addresses ! @@ -382,31 +408,33 @@ static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest, u32 nbBlocks) int dma_ch1; struct omap_dma_channel_params ch0_parameters; struct omap_dma_channel_params ch1_parameters; - u32 nLength = nbBlocks * AES_BLOCK_SIZE; - u32 nLengthLoop = 0; - u32 nbBlocksLoop = 0; - struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice(); + u32 length = nb_blocks * AES_BLOCK_SIZE; + u32 length_loop = 0; + u32 nb_blocksLoop = 0; + struct tf_device *dev = tf_get_device(); dprintk(KERN_INFO - "PDrvCryptoUpdateAESWithDMA: In=0x%08x, Out=0x%08x, Len=%u\n", - (unsigned int)pSrc, - (unsigned int)pDest, - (unsigned int)nLength); + "%s: In=0x%08x, Out=0x%08x, Len=%u\n", + __func__, + (unsigned int)src, + (unsigned int)dest, + (unsigned int)length); /*lock the DMA */ - mutex_lock(&pDevice->sm.sDMALock); + while (!mutex_trylock(&dev->sm.dma_mutex)) + cpu_relax(); - if (scxPublicDMARequest(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { - mutex_unlock(&pDevice->sm.sDMALock); - return; + if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { + mutex_unlock(&dev->sm.dma_mutex); + return false; } - if (scxPublicDMARequest(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { - scxPublicDMARelease(dma_ch0); - mutex_unlock(&pDevice->sm.sDMALock); - return; + if (tf_dma_request(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { + omap_free_dma(dma_ch0); + mutex_unlock(&dev->sm.dma_mutex); + return false; } - while (nLength > 0) { + while (length > 0) { /* * At this time, we are sure that the DMAchannels @@ -414,52 +442,61 @@ static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest, u32 nbBlocks) */ /*DMA used for Input and Output */ - OUTREG32(&pAESReg_t->AES_SYSCONFIG, - INREG32(&pAESReg_t->AES_SYSCONFIG) + OUTREG32(&paes_reg->AES_SYSCONFIG, + INREG32(&paes_reg->AES_SYSCONFIG) | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT); /*check length */ - if (nLength <= pDevice->nDMABufferLength) - nLengthLoop = nLength; + if (length <= dev->dma_buffer_length) + length_loop = length; else - nLengthLoop = pDevice->nDMABufferLength; + length_loop = dev->dma_buffer_length; /*The length is always a multiple of the block size */ - nbBlocksLoop = nLengthLoop / AES_BLOCK_SIZE; + nb_blocksLoop = length_loop / AES_BLOCK_SIZE; /* - *Copy the data from the input buffer into a preallocated - *buffer which is aligned on the beginning of a page. - *This may prevent potential issues when flushing/invalidating - *the buffer as the cache lines are 64 bytes long. + * Copy the data from the user input buffer into a preallocated + * buffer which has correct properties from efficient DMA + * transfers. */ - memcpy(pDevice->pDMABuffer, pSrc, nLengthLoop); + if (!is_kernel) { + if (copy_from_user( + dev->dma_buffer, src, length_loop)) { + omap_free_dma(dma_ch0); + omap_free_dma(dma_ch1); + mutex_unlock(&dev->sm.dma_mutex); + return false; + } + } else { + memcpy(dev->dma_buffer, src, length_loop); + } /*DMA1: Mem -> AES */ - scxPublicSetDMAChannelCommonParams(&ch0_parameters, - nbBlocksLoop, + tf_dma_set_channel_common_params(&ch0_parameters, + nb_blocksLoop, DMA_CEN_Elts_per_Frame_AES, AES1_REGS_HW_ADDR + 0x60, - (u32)pDevice->pDMABufferPhys, + (u32)dev->dma_buffer_phys, OMAP44XX_DMA_AES1_P_DATA_IN_REQ); ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC; ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT; ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC; - dprintk(KERN_INFO "PDrvCryptoUpdateAESWithDMA: \ - scxPublicDMASetParams(ch0)\n"); - scxPublicDMASetParams(dma_ch0, &ch0_parameters); + dprintk(KERN_INFO "%s: omap_set_dma_params(ch0)\n", __func__); + omap_set_dma_params(dma_ch0, &ch0_parameters); - omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16); - omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16); + omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8); + omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8); + omap_set_dma_src_data_pack(dma_ch0, 1); /*DMA2: AES -> Mem */ - scxPublicSetDMAChannelCommonParams(&ch1_parameters, - nbBlocksLoop, + tf_dma_set_channel_common_params(&ch1_parameters, + nb_blocksLoop, DMA_CEN_Elts_per_Frame_AES, - (u32)pDevice->pDMABufferPhys, + (u32)dev->dma_buffer_phys, AES1_REGS_HW_ADDR + 0x60, OMAP44XX_DMA_AES1_P_DATA_OUT_REQ); @@ -467,36 +504,36 @@ static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest, u32 nbBlocks) ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC; ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC; - dprintk(KERN_INFO "PDrvCryptoUpdateAESWithDMA: \ - scxPublicDMASetParams(ch1)\n"); - scxPublicDMASetParams(dma_ch1, &ch1_parameters); + dprintk(KERN_INFO "%s: omap_set_dma_params(ch1)\n", __func__); + omap_set_dma_params(dma_ch1, &ch1_parameters); - omap_set_dma_src_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_16); - omap_set_dma_dest_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_16); + omap_set_dma_src_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8); + omap_set_dma_dest_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8); + omap_set_dma_dest_data_pack(dma_ch1, 1); wmb(); dprintk(KERN_INFO - "PDrvCryptoUpdateAESWithDMA: Start DMA channel %d\n", - (unsigned int)dma_ch1); - scxPublicDMAStart(dma_ch1, OMAP_DMA_BLOCK_IRQ); + "%s: Start DMA channel %d\n", + __func__, (unsigned int)dma_ch1); + tf_dma_start(dma_ch1, OMAP_DMA_BLOCK_IRQ); dprintk(KERN_INFO - "PDrvCryptoUpdateAESWithDMA: Start DMA channel %d\n", - (unsigned int)dma_ch0); - scxPublicDMAStart(dma_ch0, OMAP_DMA_BLOCK_IRQ); + "%s: Start DMA channel %d\n", + __func__, (unsigned int)dma_ch0); + tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ); dprintk(KERN_INFO - "PDrvCryptoUpdateAESWithDMA: Waiting for IRQ\n"); - scxPublicDMAWait(2); + "%s: Waiting for IRQ\n", __func__); + tf_dma_wait(2); /*Unset DMA synchronisation requests */ - OUTREG32(&pAESReg_t->AES_SYSCONFIG, - INREG32(&pAESReg_t->AES_SYSCONFIG) + OUTREG32(&paes_reg->AES_SYSCONFIG, + INREG32(&paes_reg->AES_SYSCONFIG) & (~AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT) & (~AES_SYSCONFIG_DMA_REQ_IN_EN_BIT)); - scxPublicDMAClearChannel(dma_ch0); - scxPublicDMAClearChannel(dma_ch1); + omap_clear_dma(dma_ch0); + omap_clear_dma(dma_ch1); /* *The dma transfer is complete @@ -504,23 +541,35 @@ static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest, u32 nbBlocks) /*The DMA output is in the preallocated aligned buffer *and needs to be copied to the output buffer.*/ - memcpy(pDest, pDevice->pDMABuffer, nLengthLoop); + if (!is_kernel) { + if (copy_to_user( + dest, dev->dma_buffer, length_loop)) { + omap_free_dma(dma_ch0); + omap_free_dma(dma_ch1); + mutex_unlock(&dev->sm.dma_mutex); + return false; + } + } else { + memcpy(dest, dev->dma_buffer, length_loop); + } - pSrc += nLengthLoop; - pDest += nLengthLoop; - nLength -= nLengthLoop; + src += length_loop; + dest += length_loop; + length -= length_loop; } /*For safety reasons, let's clean the working buffer */ - memset(pDevice->pDMABuffer, 0, nLengthLoop); + memset(dev->dma_buffer, 0, length_loop); /*release the DMA */ - scxPublicDMARelease(dma_ch0); - scxPublicDMARelease(dma_ch1); + omap_free_dma(dma_ch0); + omap_free_dma(dma_ch1); + + mutex_unlock(&dev->sm.dma_mutex); - mutex_unlock(&pDevice->sm.sDMALock); + dprintk(KERN_INFO "%s: Success\n", __func__); - dprintk(KERN_INFO "PDrvCryptoUpdateAESWithDMA: Success\n"); + return true; } #ifdef CONFIG_SMC_KERNEL_CRYPTO @@ -579,7 +628,7 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx) size_t count; dma_addr_t addr_in, addr_out; struct omap_dma_channel_params dma_params; - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = + struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req)); if (sg_is_last(ctx->in_sg) && sg_is_last(ctx->out_sg)) { @@ -622,14 +671,14 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx) ctx->total -= count; - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA); + tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA); /* Configure HWA */ - SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG); + tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); - PDrvCryptoRestoreAESRegisters(state); + tf_aes_restore_registers(state); - OUTREG32(&pAESReg_t->AES_SYSCONFIG, INREG32(&pAESReg_t->AES_SYSCONFIG) + OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG) | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT); @@ -657,8 +706,9 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx) omap_set_dma_params(ctx->dma_lch_in, &dma_params); - omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_16); - omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_16); + omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); + omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8); + omap_set_dma_src_data_pack(ctx->dma_lch_in, 1); /* OUT */ dma_params.trigger = ctx->dma_out; @@ -670,8 +720,9 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx) omap_set_dma_params(ctx->dma_lch_out, &dma_params); - omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_16); - omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_16); + omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); + omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8); + omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1); /* Is this really needed? */ omap_disable_dma_irq(ctx->dma_lch_in, OMAP_DMA_DROP_IRQ); @@ -689,14 +740,14 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx) static int aes_dma_stop(struct aes_hwa_ctx *ctx) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = + struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req)); int err = 0; size_t count; dprintk(KERN_INFO "aes_dma_stop(%p)\n", ctx); - PDrvCryptoSaveAESRegisters(state); + tf_aes_save_registers(state); if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) { u32 *ptr = (u32 *) ctx->req->info; @@ -707,11 +758,11 @@ static int aes_dma_stop(struct aes_hwa_ctx *ctx) ptr[3] = state->AES_IV_3; } - OUTREG32(&pAESReg_t->AES_SYSCONFIG, 0); + OUTREG32(&paes_reg->AES_SYSCONFIG, 0); - SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG); + tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA); + tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA); omap_stop_dma(ctx->dma_lch_in); omap_stop_dma(ctx->dma_lch_out); @@ -812,7 +863,7 @@ static void aes_dma_cleanup(struct aes_hwa_ctx *ctx) static int aes_handle_req(struct aes_hwa_ctx *ctx) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state; + struct tf_crypto_aes_operation_state *state; struct crypto_async_request *async_req, *backlog; struct ablkcipher_request *req; unsigned long flags; @@ -866,7 +917,7 @@ static void aes_tasklet(unsigned long data) } /* Generic */ -static int aes_setkey(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state, +static int aes_setkey(struct tf_crypto_aes_operation_state *state, const u8 *key, unsigned int keylen) { u32 *ptr = (u32 *)key; @@ -908,6 +959,9 @@ static int aes_operate(struct ablkcipher_request *req) unsigned long flags; int err; + /* Make sure AES HWA is accessible */ + tf_delayed_secure_resume(); + spin_lock_irqsave(&aes_ctx->lock, flags); err = ablkcipher_enqueue_request(&aes_ctx->queue, req); spin_unlock_irqrestore(&aes_ctx->lock, flags); @@ -920,7 +974,7 @@ static int aes_operate(struct ablkcipher_request *req) static int aes_encrypt(struct ablkcipher_request *req) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = + struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT; @@ -930,7 +984,7 @@ static int aes_encrypt(struct ablkcipher_request *req) static int aes_decrypt(struct ablkcipher_request *req) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = + struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT); @@ -939,53 +993,110 @@ static int aes_decrypt(struct ablkcipher_request *req) return aes_operate(req); } -static int aes_single_setkey(struct crypto_tfm *tfm, const u8 *key, - unsigned int keylen) +static int aes_sync_operate(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = crypto_tfm_ctx(tfm); + struct crypto_tfm *tfm = crypto_blkcipher_tfm(desc->tfm); + struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm); + struct blkcipher_walk walk; + int err; - state->CTRL = AES_CTRL_MODE_ECB_BIT; + if (nbytes % AES_BLOCK_SIZE) + return -EINVAL; - return aes_setkey(state, key, keylen); + /* Make sure AES HWA is accessible */ + tf_delayed_secure_resume(); + + tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA); + tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); + + blkcipher_walk_init(&walk, dst, src, nbytes); + err = blkcipher_walk_virt(desc, &walk); + + if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) { + u32 *ptr = (u32 *) walk.iv; + + state->AES_IV_0 = ptr[0]; + state->AES_IV_1 = ptr[1]; + state->AES_IV_2 = ptr[2]; + state->AES_IV_3 = ptr[3]; + } + + while ((nbytes = walk.nbytes)) { + if (!tf_aes_update(state, walk.src.virt.addr, + walk.dst.virt.addr, nbytes / AES_BLOCK_SIZE)) { + err = -EINVAL; + break; + } + + /* tf_aes_update processes all the data */ + nbytes = 0; + + err = blkcipher_walk_done(desc, &walk, nbytes); + } + + if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) { + u32 *ptr = (u32 *) walk.iv; + + ptr[0] = state->AES_IV_0; + ptr[1] = state->AES_IV_1; + ptr[2] = state->AES_IV_2; + ptr[3] = state->AES_IV_3; + } + + tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG); + tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA); + + return err; } -static void aes_single_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static int aes_sync_encrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = crypto_tfm_ctx(tfm); + struct crypto_tfm *tfm = crypto_blkcipher_tfm(desc->tfm); + struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm); state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT; - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA); + dprintk(KERN_INFO "aes_sync_encrypt nbytes=0x%x\n", nbytes); - SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG); - PDrvCryptoUpdateAES(state, (u8 *) in, out, 1); - SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG); - - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA); + return aes_sync_operate(desc, dst, src, nbytes); } -static void aes_single_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +static int aes_sync_decrypt(struct blkcipher_desc *desc, + struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = - crypto_tfm_ctx(tfm); + struct crypto_tfm *tfm = crypto_blkcipher_tfm(desc->tfm); + struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm); state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT); state->CTRL |= AES_CTRL_DIRECTION_DECRYPT; - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA); - - SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG); - PDrvCryptoUpdateAES(state, (u8 *) in, out, 1); - SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG); + dprintk(KERN_INFO "aes_sync_decrypt\n"); - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA); + return aes_sync_operate(desc, dst, src, nbytes); } /* AES ECB */ +static int aes_ecb_sync_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm); + + state->CTRL = AES_CTRL_MODE_ECB_BIT; + + dprintk(KERN_INFO "aes_ecb_sync_setkey\n"); + + return aes_setkey(state, key, keylen); +} + static int aes_ecb_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = + struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(tfm); state->CTRL = AES_CTRL_MODE_ECB_BIT; @@ -994,10 +1105,22 @@ static int aes_ecb_setkey(struct crypto_ablkcipher *tfm, const u8 *key, } /* AES CBC */ +static int aes_cbc_sync_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm); + + state->CTRL = AES_CTRL_MODE_CBC_BIT; + + dprintk(KERN_INFO "aes_cbc_sync_setkey\n"); + + return aes_setkey(state, key, keylen); +} + static int aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = + struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(tfm); state->CTRL = AES_CTRL_MODE_CBC_BIT; @@ -1006,10 +1129,22 @@ static int aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, } /* AES CTR */ +static int aes_ctr_sync_setkey(struct crypto_tfm *tfm, const u8 *key, + unsigned int keylen) +{ + struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm); + + state->CTRL = AES_CTRL_MODE_CTR_BIT; + + dprintk(KERN_INFO "aes_cbc_sync_setkey\n"); + + return aes_setkey(state, key, keylen); +} + static int aes_ctr_setkey(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen) { - struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = + struct tf_crypto_aes_operation_state *state = crypto_ablkcipher_ctx(tfm); /* Always defaults to 128-bit counter */ @@ -1018,24 +1153,73 @@ static int aes_ctr_setkey(struct crypto_ablkcipher *tfm, const u8 *key, return aes_setkey(state, key, keylen); } -static struct crypto_alg smc_aes_alg = { - .cra_flags = CRYPTO_ALG_TYPE_CIPHER, +static struct crypto_alg smc_aes_ecb_sync_alg = { + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_priority = 999, - .cra_name = "aes", - .cra_driver_name = "aes-smc", + .cra_name = "ecb(aes)", + .cra_driver_name = "aes-ecb-smc", + .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = - sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE), + sizeof(struct tf_crypto_aes_operation_state), .cra_alignmask = 3, - .cra_list = LIST_HEAD_INIT(smc_aes_alg.cra_list), + .cra_list = LIST_HEAD_INIT(smc_aes_ecb_sync_alg.cra_list), .cra_u = { - .cipher = { - .cia_min_keysize = AES_MIN_KEY_SIZE, - .cia_max_keysize = AES_MAX_KEY_SIZE, - .cia_setkey = aes_single_setkey, - .cia_encrypt = aes_single_encrypt, - .cia_decrypt = aes_single_decrypt, + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_ecb_sync_setkey, + .encrypt = aes_sync_encrypt, + .decrypt = aes_sync_decrypt, + } + }, +}; + +static struct crypto_alg smc_aes_cbc_sync_alg = { + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_priority = 999, + .cra_name = "cbc(aes)", + .cra_driver_name = "aes-cbc-smc", + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = + sizeof(struct tf_crypto_aes_operation_state), + .cra_alignmask = 3, + .cra_list = LIST_HEAD_INIT(smc_aes_cbc_sync_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE, + .setkey = aes_cbc_sync_setkey, + .encrypt = aes_sync_encrypt, + .decrypt = aes_sync_decrypt, + } + }, +}; + +static struct crypto_alg smc_aes_ctr_sync_alg = { + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, + .cra_priority = 999, + .cra_name = "ctr(aes)", + .cra_driver_name = "aes-ctr-smc", + .cra_type = &crypto_blkcipher_type, + .cra_module = THIS_MODULE, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = + sizeof(struct tf_crypto_aes_operation_state), + .cra_alignmask = 3, + .cra_list = LIST_HEAD_INIT(smc_aes_ctr_sync_alg.cra_list), + .cra_u = { + .blkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE, + .setkey = aes_ctr_sync_setkey, + .encrypt = aes_sync_encrypt, + .decrypt = aes_sync_decrypt, } }, }; @@ -1049,7 +1233,7 @@ static struct crypto_alg smc_aes_ecb_alg = { .cra_module = THIS_MODULE, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = - sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE), + sizeof(struct tf_crypto_aes_operation_state), .cra_alignmask = 3, .cra_list = LIST_HEAD_INIT(smc_aes_ecb_alg.cra_list), .cra_u = { @@ -1072,7 +1256,7 @@ static struct crypto_alg smc_aes_cbc_alg = { .cra_type = &crypto_ablkcipher_type, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = - sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE), + sizeof(struct tf_crypto_aes_operation_state), .cra_alignmask = 3, .cra_list = LIST_HEAD_INIT(smc_aes_cbc_alg.cra_list), .cra_u = { @@ -1096,7 +1280,7 @@ static struct crypto_alg smc_aes_ctr_alg = { .cra_type = &crypto_ablkcipher_type, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = - sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE), + sizeof(struct tf_crypto_aes_operation_state), .cra_alignmask = 3, .cra_list = LIST_HEAD_INIT(smc_aes_ctr_alg.cra_list), .cra_u = { @@ -1122,7 +1306,7 @@ int register_smc_public_crypto_aes(void) crypto_init_queue(&aes_ctx->queue, 1); tasklet_init(&aes_ctx->task, aes_tasklet, (unsigned long)aes_ctx); - spin_lock_init(&aes_ctx->lock); + spin_lock_init(&aes_ctx->lock); aes_ctx->dma_in = OMAP44XX_DMA_AES1_P_DATA_IN_REQ; aes_ctx->dma_out = OMAP44XX_DMA_AES1_P_DATA_OUT_REQ; @@ -1131,9 +1315,17 @@ int register_smc_public_crypto_aes(void) if (ret) goto err_dma; - ret = crypto_register_alg(&smc_aes_alg); + ret = crypto_register_alg(&smc_aes_ecb_sync_alg); if (ret) - goto err_dma; + goto err_ecb_sync; + + ret = crypto_register_alg(&smc_aes_cbc_sync_alg); + if (ret) + goto err_cbc_sync; + + ret = crypto_register_alg(&smc_aes_ctr_sync_alg); + if (ret) + goto err_ctr_sync; ret = crypto_register_alg(&smc_aes_ecb_alg); if (ret) @@ -1154,7 +1346,13 @@ err_ctr: err_cbc: crypto_unregister_alg(&smc_aes_ecb_alg); err_ecb: - crypto_unregister_alg(&smc_aes_alg); + crypto_unregister_alg(&smc_aes_ctr_sync_alg); +err_ctr_sync: + crypto_unregister_alg(&smc_aes_cbc_sync_alg); +err_cbc_sync: + crypto_unregister_alg(&smc_aes_ecb_sync_alg); +err_ecb_sync: + aes_dma_cleanup(aes_ctx); err_dma: tasklet_kill(&aes_ctx->task); kfree(aes_ctx); @@ -1166,15 +1364,17 @@ void unregister_smc_public_crypto_aes(void) if (aes_ctx == NULL) return; - crypto_unregister_alg(&smc_aes_alg); + crypto_unregister_alg(&smc_aes_ecb_sync_alg); + crypto_unregister_alg(&smc_aes_cbc_sync_alg); + crypto_unregister_alg(&smc_aes_ctr_sync_alg); + crypto_unregister_alg(&smc_aes_ecb_alg); crypto_unregister_alg(&smc_aes_cbc_alg); crypto_unregister_alg(&smc_aes_ctr_alg); - tasklet_kill(&aes_ctx->task); - aes_dma_cleanup(aes_ctx); + tasklet_kill(&aes_ctx->task); kfree(aes_ctx); } #endif diff --git a/security/smc/tf_crypto_des.c b/security/smc/tf_crypto_des.c new file mode 100644 index 0000000..716a60f --- /dev/null +++ b/security/smc/tf_crypto_des.c @@ -0,0 +1,404 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include "tf_defs.h" +#include "tf_util.h" +#include "tf_crypto.h" +#include "tf_dma.h" + +#include <linux/io.h> +#include <mach/io.h> + +/* + * DES Hardware Accelerator: Base address + */ +#define DES_REGS_HW_ADDR 0x480A5000 + +/* + * CTRL register Masks + */ +#define DES_CTRL_OUTPUT_READY_BIT (1<<0) +#define DES_CTRL_INPUT_READY_BIT (1<<1) + +#define DES_CTRL_GET_DIRECTION(x) (x&4) +#define DES_CTRL_DIRECTION_DECRYPT 0 +#define DES_CTRL_DIRECTION_ENCRYPT (1<<2) + +#define DES_CTRL_GET_TDES(x) (x&8) +#define DES_CTRL_TDES_DES 0 +#define DES_CTRL_TDES_TRIPLE_DES (1<<3) + +#define DES_CTRL_GET_MODE(x) (x&0x10) +#define DES_CTRL_MODE_ECB 0 +#define DES_CTRL_MODE_CBC (1<<4) + +/* + * SYSCONFIG register masks + */ +#define DES_SYSCONFIG_DMA_REQ_IN_EN_BIT (1<<5) +#define DES_SYSCONFIG_DMA_REQ_OUT_EN_BIT (1<<6) + +/*------------------------------------------------------------------------*/ +/* DES/DES3 Context */ +/*------------------------------------------------------------------------*/ +/** + * This structure contains the registers of the DES HW accelerator. + */ +struct des3_des_reg { + u32 DES_KEY3_L; /* DES Key 3 Low Register */ + u32 DES_KEY3_H; /* DES Key 3 High Register */ + u32 DES_KEY2_L; /* DES Key 2 Low Register */ + u32 DES_KEY2_H; /* DES Key 2 High Register */ + u32 DES_KEY1_L; /* DES Key 1 Low Register */ + u32 DES_KEY1_H; /* DES Key 1 High Register */ + u32 DES_IV_L; /* DES Initialization Vector Low Reg */ + u32 DES_IV_H; /* DES Initialization Vector High Reg */ + u32 DES_CTRL; /* DES Control Register */ + u32 DES_LENGTH; /* DES Length Register */ + u32 DES_DATA_L; /* DES Data Input/Output Low Register */ + u32 DES_DATA_H; /* DES Data Input/Output High Register */ + u32 DES_REV; /* DES Revision Register */ + u32 DES_SYSCONFIG; /* DES Mask and Reset Register */ + u32 DES_SYSSTATUS; /* DES System Status Register */ +}; + +static struct des3_des_reg *des_reg; + +/*------------------------------------------------------------------------ + *Forward declarations + *------------------------------------------------------------------------ */ + +static bool tf_des_update_dma(u8 *src, u8 *dest, u32 nb_blocks); + +/*------------------------------------------------------------------------- + *Save HWA registers into the specified operation state structure + *-------------------------------------------------------------------------*/ +static void tf_des_save_registers(u32 DES_CTRL, + struct tf_crypto_des_operation_state *des_state) +{ + dprintk(KERN_INFO + "tf_des_save_registers in des_state=%p CTRL=0x%08x\n", + des_state, DES_CTRL); + + /*Save the IV if we are in CBC mode */ + if (DES_CTRL_GET_MODE(DES_CTRL) == DES_CTRL_MODE_CBC) { + des_state->DES_IV_L = INREG32(&des_reg->DES_IV_L); + des_state->DES_IV_H = INREG32(&des_reg->DES_IV_H); + } +} + +/*------------------------------------------------------------------------- + *Restore the HWA registers from the operation state structure + *-------------------------------------------------------------------------*/ +static void tf_des_restore_registers(u32 DES_CTRL, + struct tf_crypto_des_operation_state *des_state) +{ + dprintk(KERN_INFO "tf_des_restore_registers from " + "des_state=%p CTRL=0x%08x\n", + des_state, DES_CTRL); + + /*Write the IV ctx->reg */ + if (DES_CTRL_GET_MODE(DES_CTRL) == DES_CTRL_MODE_CBC) { + OUTREG32(&des_reg->DES_IV_L, des_state->DES_IV_L); + OUTREG32(&des_reg->DES_IV_H, des_state->DES_IV_H); + } + + /*Set the DIRECTION and CBC bits in the CTRL register. + *Keep the TDES from the accelerator */ + OUTREG32(&des_reg->DES_CTRL, + (INREG32(&des_reg->DES_CTRL) & (1 << 3)) | + (DES_CTRL & ((1 << 2) | (1 << 4)))); + + /*Set the SYSCONFIG register to 0 */ + OUTREG32(&des_reg->DES_SYSCONFIG, 0); +} + +/*------------------------------------------------------------------------- */ + +void tf_des_init(void) +{ + des_reg = omap_ioremap(DES_REGS_HW_ADDR, SZ_1M, MT_DEVICE); + if (des_reg == NULL) + panic("Unable to remap DES/3DES module"); +} + +void tf_des_exit(void) +{ + omap_iounmap(des_reg); +} + +bool tf_des_update(u32 DES_CTRL, + struct tf_crypto_des_operation_state *des_state, + u8 *src, u8 *dest, u32 nb_blocks) +{ + u32 nbr_of_blocks; + u32 temp; + u8 *process_src; + u8 *process_dest; + u32 dma_use = PUBLIC_CRYPTO_DMA_USE_NONE; + + /* + *Choice of the processing type + */ + if (nb_blocks * DES_BLOCK_SIZE >= DMA_TRIGGER_IRQ_DES) + dma_use = PUBLIC_CRYPTO_DMA_USE_IRQ; + + dprintk(KERN_INFO "tf_des_update: " + "src=0x%08x, dest=0x%08x, nb_blocks=0x%08x, dma_use=0x%08x\n", + (unsigned int)src, (unsigned int)dest, + (unsigned int)nb_blocks, (unsigned int)dma_use); + + if (nb_blocks == 0) { + dprintk(KERN_INFO "tf_des_update: Nothing to process\n"); + return true; + } + + if (DES_CTRL_GET_DIRECTION(INREG32(&des_reg->DES_CTRL)) != + DES_CTRL_GET_DIRECTION(DES_CTRL)) { + dprintk(KERN_WARNING "HWA configured for another direction\n"); + return false; + } + + /*Restore the registers of the accelerator from the operation state */ + tf_des_restore_registers(DES_CTRL, des_state); + + OUTREG32(&des_reg->DES_LENGTH, nb_blocks * DES_BLOCK_SIZE); + + if (dma_use == PUBLIC_CRYPTO_DMA_USE_IRQ) { + + /*perform the update with DMA */ + if (!tf_des_update_dma(src, dest, nb_blocks)) + return false; + + } else { + u8 buf[DMA_TRIGGER_IRQ_DES]; + + process_src = process_dest = buf; + + if (copy_from_user(buf, src, nb_blocks * DES_BLOCK_SIZE)) + return false; + + for (nbr_of_blocks = 0; + nbr_of_blocks < nb_blocks; nbr_of_blocks++) { + + /*We wait for the input ready */ + /*Crash the system as this should never occur */ + if (tf_crypto_wait_for_ready_bit( + (u32 *)&des_reg->DES_CTRL, + DES_CTRL_INPUT_READY_BIT) != + PUBLIC_CRYPTO_OPERATION_SUCCESS) { + panic("Wait too long for DES HW " + "accelerator Input data to be ready\n"); + } + + /*We copy the 8 bytes of data src->reg */ + temp = (u32) BYTES_TO_LONG(process_src); + OUTREG32(&des_reg->DES_DATA_L, temp); + process_src += 4; + temp = (u32) BYTES_TO_LONG(process_src); + OUTREG32(&des_reg->DES_DATA_H, temp); + process_src += 4; + + /*We wait for the output ready */ + tf_crypto_wait_for_ready_bit_infinitely( + (u32 *)&des_reg->DES_CTRL, + DES_CTRL_OUTPUT_READY_BIT); + + /*We copy the 8 bytes of data reg->dest */ + temp = INREG32(&des_reg->DES_DATA_L); + LONG_TO_BYTE(process_dest, temp); + process_dest += 4; + temp = INREG32(&des_reg->DES_DATA_H); + LONG_TO_BYTE(process_dest, temp); + process_dest += 4; + } + + if (copy_to_user(dest, buf, nb_blocks * DES_BLOCK_SIZE)) + return false; + } + + /*Save the accelerator registers into the operation state */ + tf_des_save_registers(DES_CTRL, des_state); + + dprintk(KERN_INFO "tf_des_update: Done\n"); + return true; +} + +/*------------------------------------------------------------------------- */ +/* + *Static function, perform DES encryption/decryption using the DMA for data + *transfer. + * + *inputs: src : pointer of the input data to process + * nb_blocks : number of block to process + * dma_use : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA) + *output: dest : pointer of the output data (can be eq to src) + */ +static bool tf_des_update_dma(u8 *src, u8 *dest, u32 nb_blocks) +{ + /* + *Note: The DMA only sees physical addresses ! + */ + + int dma_ch0; + int dma_ch1; + struct omap_dma_channel_params ch0_parameters; + struct omap_dma_channel_params ch1_parameters; + u32 length = nb_blocks * DES_BLOCK_SIZE; + u32 length_loop = 0; + u32 nb_blocksLoop = 0; + struct tf_device *dev = tf_get_device(); + + dprintk(KERN_INFO + "tf_des_update_dma: In=0x%08x, Out=0x%08x, Len=%u\n", + (unsigned int)src, (unsigned int)dest, + (unsigned int)length); + + /*lock the DMA */ + mutex_lock(&dev->sm.dma_mutex); + + if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { + mutex_unlock(&dev->sm.dma_mutex); + return false; + } + if (tf_dma_request(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { + omap_free_dma(dma_ch0); + mutex_unlock(&dev->sm.dma_mutex); + return false; + } + + while (length > 0) { + + /* + * At this time, we are sure that the DMAchannels are available + * and not used by other public crypto operation + */ + + /*DMA used for Input and Output */ + OUTREG32(&des_reg->DES_SYSCONFIG, + INREG32(&des_reg->DES_SYSCONFIG) + | DES_SYSCONFIG_DMA_REQ_OUT_EN_BIT + | DES_SYSCONFIG_DMA_REQ_IN_EN_BIT); + + /* Check length */ + if (length <= dev->dma_buffer_length) + length_loop = length; + else + length_loop = dev->dma_buffer_length; + + /* The length is always a multiple of the block size */ + nb_blocksLoop = length_loop / DES_BLOCK_SIZE; + + /* + * Copy the data from the user input buffer into a preallocated + * buffer which has correct properties from efficient DMA + * transfers. + */ + if (copy_from_user(dev->dma_buffer, src, length_loop)) { + omap_free_dma(dma_ch0); + omap_free_dma(dma_ch1); + mutex_unlock(&dev->sm.dma_mutex); + return false; + } + + /* DMA1: Mem -> DES */ + tf_dma_set_channel_common_params(&ch0_parameters, + nb_blocksLoop, + DMA_CEN_Elts_per_Frame_DES, + DES_REGS_HW_ADDR + 0x28, + dev->dma_buffer_phys, + OMAP44XX_DMA_DES_P_DATA_IN_REQ); + + ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC; + ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT; + ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC; + + dprintk(KERN_INFO + "tf_des_update_dma: omap_set_dma_params(ch0)\n"); + omap_set_dma_params(dma_ch0, &ch0_parameters); + + /* DMA2: DES -> Mem */ + tf_dma_set_channel_common_params(&ch1_parameters, + nb_blocksLoop, + DMA_CEN_Elts_per_Frame_DES, + dev->dma_buffer_phys, + DES_REGS_HW_ADDR + 0x28, + OMAP44XX_DMA_DES_P_DATA_OUT_REQ); + + ch1_parameters.src_amode = OMAP_DMA_AMODE_CONSTANT; + ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC; + ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC; + + dprintk(KERN_INFO "tf_des_update_dma: " + "omap_set_dma_params(ch1)\n"); + omap_set_dma_params(dma_ch1, &ch1_parameters); + + wmb(); + + dprintk(KERN_INFO + "tf_des_update_dma: Start DMA channel %d\n", + (unsigned int)dma_ch0); + tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ); + + dprintk(KERN_INFO + "tf_des_update_dma: Start DMA channel %d\n", + (unsigned int)dma_ch1); + tf_dma_start(dma_ch1, OMAP_DMA_BLOCK_IRQ); + tf_dma_wait(2); + + /* Unset DMA synchronisation requests */ + OUTREG32(&des_reg->DES_SYSCONFIG, + INREG32(&des_reg->DES_SYSCONFIG) + & (~DES_SYSCONFIG_DMA_REQ_OUT_EN_BIT) + & (~DES_SYSCONFIG_DMA_REQ_IN_EN_BIT)); + + omap_clear_dma(dma_ch0); + omap_clear_dma(dma_ch1); + + /* + * The dma transfer is complete + */ + + /*The DMA output is in the preallocated aligned buffer + *and needs to be copied to the output buffer.*/ + if (copy_to_user(dest, dev->dma_buffer, length_loop)) { + omap_free_dma(dma_ch0); + omap_free_dma(dma_ch1); + mutex_unlock(&dev->sm.dma_mutex); + return false; + } + + src += length_loop; + dest += length_loop; + length -= length_loop; + } + + /* For safety reasons, let's clean the working buffer */ + memset(dev->dma_buffer, 0, length_loop); + + /* Release the DMA */ + omap_free_dma(dma_ch0); + omap_free_dma(dma_ch1); + + mutex_unlock(&dev->sm.dma_mutex); + + dprintk(KERN_INFO "tf_des_update_dma: Success\n"); + + return true; +} diff --git a/security/smc/omap4/scx_public_crypto_Digest.c b/security/smc/tf_crypto_digest.c index 7a40089..d69a97f 100644 --- a/security/smc/omap4/scx_public_crypto_Digest.c +++ b/security/smc/tf_crypto_digest.c @@ -1,5 +1,5 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. +/** + * Copyright (c) 2011 Trusted Logic S.A. * All Rights Reserved. * * This program is free software; you can redistribute it and/or @@ -17,11 +17,11 @@ * MA 02111-1307 USA */ -#include "scxlnx_defs.h" -#include "scxlnx_util.h" -#include "scx_public_crypto.h" -#include "scx_public_dma.h" -#include "scxlnx_mshield.h" +#include "tf_defs.h" +#include "tf_util.h" +#include "tf_crypto.h" +#include "tf_dma.h" +#include "tf_zebra.h" #include <linux/io.h> #include <mach/io.h> @@ -65,7 +65,7 @@ /** * This structure contains the registers of the SHA1/MD5 HW accelerator. */ -struct Sha1Md5Reg_t { +struct sha1_md5_reg { u32 ODIGEST_A; /* 0x00 Outer Digest A */ u32 ODIGEST_B; /* 0x04 Outer Digest B */ u32 ODIGEST_C; /* 0x08 Outer Digest C */ @@ -117,7 +117,7 @@ struct Sha1Md5Reg_t { u32 IRQENABLE; /* 0x11C IRQ Enable */ }; -static struct Sha1Md5Reg_t *pSha1Md5Reg_t; +static struct sha1_md5_reg *sha1_md5_reg; static const u8 md5OverEmptyString[] = { 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, @@ -148,111 +148,112 @@ static const u8 sha256OverEmptyString[] = { *Forward declarations *------------------------------------------------------------------------- */ -static void static_Hash_HwPerform64bDigest(u32 *pData, - u32 nAlgo, u32 nBytesProcessed); -static void static_Hash_HwPerformDmaDigest(u8 *pData, u32 nDataLength, - u32 nAlgo, u32 nBytesProcessed); +static void tf_digest_hw_perform_64b(u32 *data, + u32 algo, u32 bytes_processed); +static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength, + u32 algo, u32 bytes_processed); -static void PDrvCryptoUpdateHashWithDMA( - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState, - u8 *pData, u32 dataLength); +static bool tf_digest_update_dma( + struct tf_crypto_sha_operation_state *sha_state, + u8 *data, u32 data_length); /*------------------------------------------------------------------------- *Save HWA registers into the specified operation state structure *------------------------------------------------------------------------*/ -static void PDrvCryptoSaveHashRegisters( - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState) +static void tf_digest_save_registers( + struct tf_crypto_sha_operation_state *sha_state) { - dprintk(KERN_INFO "PDrvCryptoSaveHashRegisters: State=%p\n", - pSHAState); - - pSHAState->SHA_DIGEST_A = INREG32(&pSha1Md5Reg_t->IDIGEST_A); - pSHAState->SHA_DIGEST_B = INREG32(&pSha1Md5Reg_t->IDIGEST_B); - pSHAState->SHA_DIGEST_C = INREG32(&pSha1Md5Reg_t->IDIGEST_C); - pSHAState->SHA_DIGEST_D = INREG32(&pSha1Md5Reg_t->IDIGEST_D); - pSHAState->SHA_DIGEST_E = INREG32(&pSha1Md5Reg_t->IDIGEST_E); - pSHAState->SHA_DIGEST_F = INREG32(&pSha1Md5Reg_t->IDIGEST_F); - pSHAState->SHA_DIGEST_G = INREG32(&pSha1Md5Reg_t->IDIGEST_G); - pSHAState->SHA_DIGEST_H = INREG32(&pSha1Md5Reg_t->IDIGEST_H); + dprintk(KERN_INFO "tf_digest_save_registers: State=%p\n", + sha_state); + + sha_state->SHA_DIGEST_A = INREG32(&sha1_md5_reg->IDIGEST_A); + sha_state->SHA_DIGEST_B = INREG32(&sha1_md5_reg->IDIGEST_B); + sha_state->SHA_DIGEST_C = INREG32(&sha1_md5_reg->IDIGEST_C); + sha_state->SHA_DIGEST_D = INREG32(&sha1_md5_reg->IDIGEST_D); + sha_state->SHA_DIGEST_E = INREG32(&sha1_md5_reg->IDIGEST_E); + sha_state->SHA_DIGEST_F = INREG32(&sha1_md5_reg->IDIGEST_F); + sha_state->SHA_DIGEST_G = INREG32(&sha1_md5_reg->IDIGEST_G); + sha_state->SHA_DIGEST_H = INREG32(&sha1_md5_reg->IDIGEST_H); } /*------------------------------------------------------------------------- *Restore the HWA registers from the operation state structure *-------------------------------------------------------------------------*/ -static void PDrvCryptoRestoreHashRegisters( - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState) +static void tf_digest_restore_registers( + struct tf_crypto_sha_operation_state *sha_state) { - dprintk(KERN_INFO "PDrvCryptoRestoreHashRegisters: State=%p\n", - pSHAState); + dprintk(KERN_INFO "tf_digest_restore_registers: State=%p\n", + sha_state); - if (pSHAState->nBytesProcessed != 0) { + if (sha_state->bytes_processed != 0) { /* * Some bytes were already processed. Initialize * previous digest */ - OUTREG32(&pSha1Md5Reg_t->IDIGEST_A, pSHAState->SHA_DIGEST_A); - OUTREG32(&pSha1Md5Reg_t->IDIGEST_B, pSHAState->SHA_DIGEST_B); - OUTREG32(&pSha1Md5Reg_t->IDIGEST_C, pSHAState->SHA_DIGEST_C); - OUTREG32(&pSha1Md5Reg_t->IDIGEST_D, pSHAState->SHA_DIGEST_D); - OUTREG32(&pSha1Md5Reg_t->IDIGEST_E, pSHAState->SHA_DIGEST_E); - OUTREG32(&pSha1Md5Reg_t->IDIGEST_F, pSHAState->SHA_DIGEST_F); - OUTREG32(&pSha1Md5Reg_t->IDIGEST_G, pSHAState->SHA_DIGEST_G); - OUTREG32(&pSha1Md5Reg_t->IDIGEST_H, pSHAState->SHA_DIGEST_H); + OUTREG32(&sha1_md5_reg->IDIGEST_A, sha_state->SHA_DIGEST_A); + OUTREG32(&sha1_md5_reg->IDIGEST_B, sha_state->SHA_DIGEST_B); + OUTREG32(&sha1_md5_reg->IDIGEST_C, sha_state->SHA_DIGEST_C); + OUTREG32(&sha1_md5_reg->IDIGEST_D, sha_state->SHA_DIGEST_D); + OUTREG32(&sha1_md5_reg->IDIGEST_E, sha_state->SHA_DIGEST_E); + OUTREG32(&sha1_md5_reg->IDIGEST_F, sha_state->SHA_DIGEST_F); + OUTREG32(&sha1_md5_reg->IDIGEST_G, sha_state->SHA_DIGEST_G); + OUTREG32(&sha1_md5_reg->IDIGEST_H, sha_state->SHA_DIGEST_H); } - OUTREG32(&pSha1Md5Reg_t->SYSCONFIG, 0); + OUTREG32(&sha1_md5_reg->SYSCONFIG, 0); } /*------------------------------------------------------------------------- */ -void PDrvCryptoDigestInit(void) +void tf_digest_init(void) { - pSha1Md5Reg_t = omap_ioremap(DIGEST1_REGS_HW_ADDR, SZ_1M, MT_DEVICE); - if (pSha1Md5Reg_t == NULL) + sha1_md5_reg = omap_ioremap(DIGEST1_REGS_HW_ADDR, SZ_1M, MT_DEVICE); + if (sha1_md5_reg == NULL) panic("Unable to remap SHA2/MD5 module"); } -void PDrvCryptoDigestExit(void) +void tf_digest_exit(void) { - omap_iounmap(pSha1Md5Reg_t); + omap_iounmap(sha1_md5_reg); } -void PDrvCryptoUpdateHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState, - u8 *pData, u32 dataLength) +bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state, + u8 *data, u32 data_length) { - u32 dmaUse = PUBLIC_CRYPTO_DMA_USE_NONE; + u32 dma_use = PUBLIC_CRYPTO_DMA_USE_NONE; /* *Choice of the processing type */ - if (dataLength >= DMA_TRIGGER_IRQ_DIGEST) - dmaUse = PUBLIC_CRYPTO_DMA_USE_IRQ; + if (data_length >= DMA_TRIGGER_IRQ_DIGEST) + dma_use = PUBLIC_CRYPTO_DMA_USE_IRQ; - dprintk(KERN_INFO "PDrvCryptoUpdateHash : "\ - "Data=0x%08x/%u, Chunk=%u, Processed=%u, dmaUse=0x%08x\n", - (u32)pData, (u32)dataLength, - pSHAState->nChunkLength, pSHAState->nBytesProcessed, - dmaUse); + dprintk(KERN_INFO "tf_digest_update : "\ + "Data=0x%08x/%u, Chunk=%u, Processed=%u, dma_use=0x%08x\n", + (u32)data, (u32)data_length, + sha_state->chunk_length, sha_state->bytes_processed, + dma_use); - if (dataLength == 0) { - dprintk(KERN_INFO "PDrvCryptoUpdateHash: "\ + if (data_length == 0) { + dprintk(KERN_INFO "tf_digest_update: "\ "Nothing to process\n"); - return; + return true; } - if (dmaUse != PUBLIC_CRYPTO_DMA_USE_NONE) { + if (dma_use != PUBLIC_CRYPTO_DMA_USE_NONE) { /* * Restore the registers of the accelerator from the operation * state */ - PDrvCryptoRestoreHashRegisters(pSHAState); + tf_digest_restore_registers(sha_state); /*perform the updates with DMA */ - PDrvCryptoUpdateHashWithDMA(pSHAState, pData, dataLength); + if (!tf_digest_update_dma(sha_state, data, data_length)) + return false; /* Save the accelerator registers into the operation state */ - PDrvCryptoSaveHashRegisters(pSHAState); + tf_digest_save_registers(sha_state); } else { /*Non-DMA transfer */ @@ -264,63 +265,66 @@ void PDrvCryptoUpdateHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState, /*Is there any data in the chunk? If yes is it possible to *make a 64B buffer with the new data passed ? */ - if ((pSHAState->nChunkLength != 0) - && (pSHAState->nChunkLength + dataLength >= + if ((sha_state->chunk_length != 0) + && (sha_state->chunk_length + data_length >= HASH_BLOCK_BYTES_LENGTH)) { u8 vLengthToComplete = - HASH_BLOCK_BYTES_LENGTH - pSHAState->nChunkLength; + HASH_BLOCK_BYTES_LENGTH - sha_state->chunk_length; /*So we fill the chunk buffer with the new data to *complete to 64B */ - memcpy(pSHAState->pChunkBuffer + pSHAState-> - nChunkLength, pData, vLengthToComplete); + if (copy_from_user( + sha_state->chunk_buffer+sha_state->chunk_length, + data, + vLengthToComplete)) + return false; - if (pSHAState->nChunkLength + dataLength == + if (sha_state->chunk_length + data_length == HASH_BLOCK_BYTES_LENGTH) { /*We'll keep some data for the final */ - pSHAState->nChunkLength = + sha_state->chunk_length = HASH_BLOCK_BYTES_LENGTH; - dprintk(KERN_INFO "PDrvCryptoUpdateHash: "\ + dprintk(KERN_INFO "tf_digest_update: "\ "Done: Chunk=%u; Processed=%u\n", - pSHAState->nChunkLength, - pSHAState->nBytesProcessed); - return; + sha_state->chunk_length, + sha_state->bytes_processed); + return true; } /* * Restore the registers of the accelerator from the * operation state */ - PDrvCryptoRestoreHashRegisters(pSHAState); + tf_digest_restore_registers(sha_state); /*Then we send this buffer to the HWA */ - static_Hash_HwPerform64bDigest( - (u32 *)pSHAState->pChunkBuffer, pSHAState->CTRL, - pSHAState->nBytesProcessed); + tf_digest_hw_perform_64b( + (u32 *)sha_state->chunk_buffer, sha_state->CTRL, + sha_state->bytes_processed); /* * Save the accelerator registers into the operation * state */ - PDrvCryptoSaveHashRegisters(pSHAState); + tf_digest_save_registers(sha_state); - pSHAState->nBytesProcessed = - INREG32(&pSha1Md5Reg_t->DIGEST_COUNT); + sha_state->bytes_processed = + INREG32(&sha1_md5_reg->DIGEST_COUNT); /*We have flushed the chunk so it is empty now */ - pSHAState->nChunkLength = 0; + sha_state->chunk_length = 0; /*Then we have less data to process */ - pData += vLengthToComplete; - dataLength -= vLengthToComplete; + data += vLengthToComplete; + data_length -= vLengthToComplete; } /*(2)We process all the 64B buffer that we can */ - if (pSHAState->nChunkLength + dataLength >= + if (sha_state->chunk_length + data_length >= HASH_BLOCK_BYTES_LENGTH) { - while (dataLength > HASH_BLOCK_BYTES_LENGTH) { + while (data_length > HASH_BLOCK_BYTES_LENGTH) { u8 pTempAlignedBuffer[HASH_BLOCK_BYTES_LENGTH]; /* @@ -328,71 +332,79 @@ void PDrvCryptoUpdateHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState, */ /*We copy the data to process to an aligned *buffer */ - memcpy(pTempAlignedBuffer, pData, - HASH_BLOCK_BYTES_LENGTH); + if (copy_from_user( + pTempAlignedBuffer, + data, + HASH_BLOCK_BYTES_LENGTH)) + return false; /*Then we send this buffer to the hash *hardware */ - PDrvCryptoRestoreHashRegisters(pSHAState); - static_Hash_HwPerform64bDigest( + tf_digest_restore_registers(sha_state); + tf_digest_hw_perform_64b( (u32 *) pTempAlignedBuffer, - pSHAState->CTRL, - pSHAState->nBytesProcessed); - PDrvCryptoSaveHashRegisters(pSHAState); + sha_state->CTRL, + sha_state->bytes_processed); + tf_digest_save_registers(sha_state); - pSHAState->nBytesProcessed = - INREG32(&pSha1Md5Reg_t->DIGEST_COUNT); + sha_state->bytes_processed = + INREG32(&sha1_md5_reg->DIGEST_COUNT); /*Then we decrease the remaining data of 64B */ - pData += HASH_BLOCK_BYTES_LENGTH; - dataLength -= HASH_BLOCK_BYTES_LENGTH; + data += HASH_BLOCK_BYTES_LENGTH; + data_length -= HASH_BLOCK_BYTES_LENGTH; } } /*(3)We look if we have some data that could not be processed *yet because it is not large enough to fill a buffer of 64B */ - if (dataLength > 0) { - if (pSHAState->nChunkLength + dataLength > + if (data_length > 0) { + if (sha_state->chunk_length + data_length > HASH_BLOCK_BYTES_LENGTH) { /*Should never be in this case !!! */ - panic("PDrvCryptoUpdateHash: nChunkLength + \ - dataLength > HASH_BLOCK_BYTES_LENGTH\n"); + panic("tf_digest_update: chunk_length data_length > " + "HASH_BLOCK_BYTES_LENGTH\n"); } /*So we fill the chunk buffer with the new data to *complete to 64B */ - memcpy(pSHAState->pChunkBuffer + pSHAState-> - nChunkLength, pData, dataLength); - pSHAState->nChunkLength += dataLength; + if (copy_from_user( + sha_state->chunk_buffer+sha_state->chunk_length, + data, + data_length)) + return false; + sha_state->chunk_length += data_length; } } - dprintk(KERN_INFO "PDrvCryptoUpdateHash: Done: "\ + dprintk(KERN_INFO "tf_digest_update: Done: "\ "Chunk=%u; Processed=%u\n", - pSHAState->nChunkLength, pSHAState->nBytesProcessed); + sha_state->chunk_length, sha_state->bytes_processed); + + return true; } /*------------------------------------------------------------------------- */ -static void static_Hash_HwPerform64bDigest(u32 *pData, - u32 nAlgo, u32 nBytesProcessed) +static void tf_digest_hw_perform_64b(u32 *data, + u32 algo, u32 bytes_processed) { - u32 nAlgoConstant = 0; + u32 algo_constant = 0; - OUTREG32(&pSha1Md5Reg_t->DIGEST_COUNT, nBytesProcessed); + OUTREG32(&sha1_md5_reg->DIGEST_COUNT, bytes_processed); - if (nBytesProcessed == 0) { + if (bytes_processed == 0) { /* No bytes processed so far. Will use the algo constant instead of previous digest */ - nAlgoConstant = 1 << 3; + algo_constant = 1 << 3; } - OUTREG32(&pSha1Md5Reg_t->MODE, - nAlgoConstant | (nAlgo & 0x6)); - OUTREG32(&pSha1Md5Reg_t->LENGTH, HASH_BLOCK_BYTES_LENGTH); + OUTREG32(&sha1_md5_reg->MODE, + algo_constant | (algo & 0x6)); + OUTREG32(&sha1_md5_reg->LENGTH, HASH_BLOCK_BYTES_LENGTH); - if (SCXPublicCryptoWaitForReadyBit( - (u32 *)&pSha1Md5Reg_t->IRQSTATUS, + if (tf_crypto_wait_for_ready_bit( + (u32 *)&sha1_md5_reg->IRQSTATUS, DIGEST_IRQSTATUS_INPUT_READY_BIT) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { /* Crash the system as this should never occur */ @@ -401,37 +413,37 @@ static void static_Hash_HwPerform64bDigest(u32 *pData, } /* - *The pData buffer is a buffer of 64 bytes. + *The data buffer is a buffer of 64 bytes. */ - OUTREG32(&pSha1Md5Reg_t->DIN_0, pData[0]); - OUTREG32(&pSha1Md5Reg_t->DIN_1, pData[1]); - OUTREG32(&pSha1Md5Reg_t->DIN_2, pData[2]); - OUTREG32(&pSha1Md5Reg_t->DIN_3, pData[3]); - OUTREG32(&pSha1Md5Reg_t->DIN_4, pData[4]); - OUTREG32(&pSha1Md5Reg_t->DIN_5, pData[5]); - OUTREG32(&pSha1Md5Reg_t->DIN_6, pData[6]); - OUTREG32(&pSha1Md5Reg_t->DIN_7, pData[7]); - OUTREG32(&pSha1Md5Reg_t->DIN_8, pData[8]); - OUTREG32(&pSha1Md5Reg_t->DIN_9, pData[9]); - OUTREG32(&pSha1Md5Reg_t->DIN_10, pData[10]); - OUTREG32(&pSha1Md5Reg_t->DIN_11, pData[11]); - OUTREG32(&pSha1Md5Reg_t->DIN_12, pData[12]); - OUTREG32(&pSha1Md5Reg_t->DIN_13, pData[13]); - OUTREG32(&pSha1Md5Reg_t->DIN_14, pData[14]); - OUTREG32(&pSha1Md5Reg_t->DIN_15, pData[15]); + OUTREG32(&sha1_md5_reg->DIN_0, data[0]); + OUTREG32(&sha1_md5_reg->DIN_1, data[1]); + OUTREG32(&sha1_md5_reg->DIN_2, data[2]); + OUTREG32(&sha1_md5_reg->DIN_3, data[3]); + OUTREG32(&sha1_md5_reg->DIN_4, data[4]); + OUTREG32(&sha1_md5_reg->DIN_5, data[5]); + OUTREG32(&sha1_md5_reg->DIN_6, data[6]); + OUTREG32(&sha1_md5_reg->DIN_7, data[7]); + OUTREG32(&sha1_md5_reg->DIN_8, data[8]); + OUTREG32(&sha1_md5_reg->DIN_9, data[9]); + OUTREG32(&sha1_md5_reg->DIN_10, data[10]); + OUTREG32(&sha1_md5_reg->DIN_11, data[11]); + OUTREG32(&sha1_md5_reg->DIN_12, data[12]); + OUTREG32(&sha1_md5_reg->DIN_13, data[13]); + OUTREG32(&sha1_md5_reg->DIN_14, data[14]); + OUTREG32(&sha1_md5_reg->DIN_15, data[15]); /* *Wait until the hash operation is finished. */ - SCXPublicCryptoWaitForReadyBitInfinitely( - (u32 *)&pSha1Md5Reg_t->IRQSTATUS, + tf_crypto_wait_for_ready_bit_infinitely( + (u32 *)&sha1_md5_reg->IRQSTATUS, DIGEST_IRQSTATUS_OUTPUT_READY_BIT); } /*------------------------------------------------------------------------- */ -static void static_Hash_HwPerformDmaDigest(u8 *pData, u32 nDataLength, - u32 nAlgo, u32 nBytesProcessed) +static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength, + u32 algo, u32 bytes_processed) { /* *Note: The DMA only sees physical addresses ! @@ -439,50 +451,53 @@ static void static_Hash_HwPerformDmaDigest(u8 *pData, u32 nDataLength, int dma_ch0; struct omap_dma_channel_params ch0_parameters; - u32 nLengthLoop = 0; - u32 nAlgoConstant; - struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice(); + u32 length_loop = 0; + u32 algo_constant; + struct tf_device *dev = tf_get_device(); dprintk(KERN_INFO - "static_Hash_HwPerformDmaDigest: Buffer=0x%08x/%u\n", - (u32)pData, (u32)nDataLength); + "tf_digest_hw_perform_dma: Buffer=0x%08x/%u\n", + (u32)data, (u32)nDataLength); /*lock the DMA */ - mutex_lock(&pDevice->sm.sDMALock); - if (scxPublicDMARequest(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { - mutex_unlock(&pDevice->sm.sDMALock); - return; + mutex_lock(&dev->sm.dma_mutex); + if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { + mutex_unlock(&dev->sm.dma_mutex); + return false; } while (nDataLength > 0) { - nAlgoConstant = 0; - if (nBytesProcessed == 0) { + algo_constant = 0; + if (bytes_processed == 0) { /*No bytes processed so far. Will use the algo *constant instead of previous digest */ - nAlgoConstant = 1 << 3; + algo_constant = 1 << 3; } /*check length */ - if (nDataLength <= pDevice->nDMABufferLength) - nLengthLoop = nDataLength; + if (nDataLength <= dev->dma_buffer_length) + length_loop = nDataLength; else - nLengthLoop = pDevice->nDMABufferLength; + length_loop = dev->dma_buffer_length; /* - *Copy the data from the input buffer into a preallocated - *buffer which is aligned on the beginning of a page. - *This may prevent potential issues when flushing/invalidating - *the buffer as the cache lines are 64 bytes long. + * Copy the data from the user input buffer into a preallocated + * buffer which has correct properties from efficient DMA + * transfers. */ - memcpy(pDevice->pDMABuffer, pData, nLengthLoop); + if (copy_from_user(dev->dma_buffer, data, length_loop)) { + omap_free_dma(dma_ch0); + mutex_unlock(&dev->sm.dma_mutex); + return false; + } /*DMA1: Mem -> HASH */ - scxPublicSetDMAChannelCommonParams(&ch0_parameters, - nLengthLoop / HASH_BLOCK_BYTES_LENGTH, + tf_dma_set_channel_common_params(&ch0_parameters, + length_loop / HASH_BLOCK_BYTES_LENGTH, DMA_CEN_Elts_per_Frame_SHA, DIGEST1_REGS_HW_ADDR + 0x80, - pDevice->pDMABufferPhys, + dev->dma_buffer_phys, OMAP44XX_DMA_SHA2_DIN_P); /*specific for Mem -> HWA */ @@ -490,55 +505,57 @@ static void static_Hash_HwPerformDmaDigest(u8 *pData, u32 nDataLength, ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT; ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC; - scxPublicDMASetParams(dma_ch0, &ch0_parameters); + omap_set_dma_params(dma_ch0, &ch0_parameters); omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16); omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16); - OUTREG32(&pSha1Md5Reg_t->DIGEST_COUNT, nBytesProcessed); - OUTREG32(&pSha1Md5Reg_t->MODE, - nAlgoConstant | (nAlgo & 0x6)); + OUTREG32(&sha1_md5_reg->DIGEST_COUNT, bytes_processed); + OUTREG32(&sha1_md5_reg->MODE, + algo_constant | (algo & 0x6)); /* * Triggers operation * Interrupt, Free Running + GO (DMA on) */ - OUTREG32(&pSha1Md5Reg_t->SYSCONFIG, - INREG32(&pSha1Md5Reg_t->SYSCONFIG) | + OUTREG32(&sha1_md5_reg->SYSCONFIG, + INREG32(&sha1_md5_reg->SYSCONFIG) | DIGEST_SYSCONFIG_PDMA_EN_BIT); - OUTREG32(&pSha1Md5Reg_t->LENGTH, nLengthLoop); + OUTREG32(&sha1_md5_reg->LENGTH, length_loop); wmb(); - scxPublicDMAStart(dma_ch0, OMAP_DMA_BLOCK_IRQ); + tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ); - scxPublicDMAWait(1); + tf_dma_wait(1); - OUTREG32(&pSha1Md5Reg_t->SYSCONFIG, 0); + OUTREG32(&sha1_md5_reg->SYSCONFIG, 0); - scxPublicDMAClearChannel(dma_ch0); + omap_clear_dma(dma_ch0); - pData += nLengthLoop; - nDataLength -= nLengthLoop; - nBytesProcessed = - INREG32(&pSha1Md5Reg_t->DIGEST_COUNT); + data += length_loop; + nDataLength -= length_loop; + bytes_processed = + INREG32(&sha1_md5_reg->DIGEST_COUNT); } /*For safety reasons, let's clean the working buffer */ - memset(pDevice->pDMABuffer, 0, nLengthLoop); + memset(dev->dma_buffer, 0, length_loop); /*release the DMA */ - scxPublicDMARelease(dma_ch0); + omap_free_dma(dma_ch0); - mutex_unlock(&pDevice->sm.sDMALock); + mutex_unlock(&dev->sm.dma_mutex); /* * The dma transfert is finished, now wait until the hash * operation is finished. */ - SCXPublicCryptoWaitForReadyBitInfinitely( - (u32 *)&pSha1Md5Reg_t->IRQSTATUS, + tf_crypto_wait_for_ready_bit_infinitely( + (u32 *)&sha1_md5_reg->IRQSTATUS, DIGEST_IRQSTATUS_CONTEXT_READY_BIT); + + return true; } /*------------------------------------------------------------------------- */ @@ -546,96 +563,101 @@ static void static_Hash_HwPerformDmaDigest(u8 *pData, u32 nDataLength, *Static function, perform data digest using the DMA for data transfer. * *inputs: - * pData : pointer of the input data to process - * dataLength : number of byte to process + * data : pointer of the input data to process + * data_length : number of byte to process */ -static void PDrvCryptoUpdateHashWithDMA( - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState, - u8 *pData, u32 dataLength) +static bool tf_digest_update_dma( + struct tf_crypto_sha_operation_state *sha_state, + u8 *data, u32 data_length) { - dprintk(KERN_INFO "PDrvCryptoUpdateHashWithDMA\n"); + dprintk(KERN_INFO "tf_digest_update_dma\n"); - if (pSHAState->nChunkLength != 0) { + if (sha_state->chunk_length != 0) { u32 vLengthToComplete; /*Fill the chunk first */ - if (pSHAState-> - nChunkLength + dataLength <= HASH_BLOCK_BYTES_LENGTH) { + if (sha_state-> + chunk_length + data_length <= HASH_BLOCK_BYTES_LENGTH) { /*So we fill the chunk buffer with the new data */ - memcpy(pSHAState-> - pChunkBuffer + pSHAState->nChunkLength, - pData, dataLength); - pSHAState->nChunkLength += dataLength; + if (copy_from_user(sha_state->chunk_buffer + + sha_state->chunk_length, data, + data_length)) + return false; + sha_state->chunk_length += data_length; /*We'll keep some data for the final */ - return; + return true; } - vLengthToComplete = HASH_BLOCK_BYTES_LENGTH - pSHAState-> - nChunkLength; + vLengthToComplete = HASH_BLOCK_BYTES_LENGTH - sha_state-> + chunk_length; if (vLengthToComplete != 0) { /*So we fill the chunk buffer with the new data to *complete to 64B */ - memcpy(pSHAState->pChunkBuffer + pSHAState-> - nChunkLength, pData, vLengthToComplete); + if (copy_from_user(sha_state->chunk_buffer + + sha_state->chunk_length, data, + vLengthToComplete)) + return false; } /*Then we send this buffer to the HWA (no DMA) */ - static_Hash_HwPerform64bDigest( - (u32 *)pSHAState->pChunkBuffer, pSHAState->CTRL, - pSHAState->nBytesProcessed); + tf_digest_hw_perform_64b( + (u32 *)sha_state->chunk_buffer, sha_state->CTRL, + sha_state->bytes_processed); - pSHAState->nBytesProcessed = - INREG32(&pSha1Md5Reg_t->DIGEST_COUNT); + sha_state->bytes_processed = + INREG32(&sha1_md5_reg->DIGEST_COUNT); /*We have flushed the chunk so it is empty now */ - pSHAState->nChunkLength = 0; + sha_state->chunk_length = 0; /*Update the data buffer depending of the data already *processed */ - pData += vLengthToComplete; - dataLength -= vLengthToComplete; + data += vLengthToComplete; + data_length -= vLengthToComplete; } - if (dataLength > HASH_BLOCK_BYTES_LENGTH) { + if (data_length > HASH_BLOCK_BYTES_LENGTH) { /*DMA only manages data length that is multiple of 64b */ - u32 vDmaProcessSize = dataLength & 0xFFFFFFC0; + u32 vDmaProcessize = data_length & 0xFFFFFFC0; - if (vDmaProcessSize == dataLength) { + if (vDmaProcessize == data_length) { /*We keep one block for the final */ - vDmaProcessSize -= HASH_BLOCK_BYTES_LENGTH; + vDmaProcessize -= HASH_BLOCK_BYTES_LENGTH; } - static_Hash_HwPerformDmaDigest(pData, vDmaProcessSize, - pSHAState->CTRL, pSHAState->nBytesProcessed); + if (!tf_digest_hw_perform_dma(data, vDmaProcessize, + sha_state->CTRL, sha_state->bytes_processed)) + return false; - pSHAState->nBytesProcessed = - INREG32(&pSha1Md5Reg_t->DIGEST_COUNT); - pData += vDmaProcessSize; - dataLength -= vDmaProcessSize; + sha_state->bytes_processed = + INREG32(&sha1_md5_reg->DIGEST_COUNT); + data += vDmaProcessize; + data_length -= vDmaProcessize; } /*At that point, there is less than 64b left to process*/ - if ((dataLength == 0) || (dataLength > HASH_BLOCK_BYTES_LENGTH)) { + if ((data_length == 0) || (data_length > HASH_BLOCK_BYTES_LENGTH)) /*Should never be in this case !!! */ - panic("PDrvCryptoUpdateHASHWithDMA: \ - Remaining dataLength=%u\n", dataLength); - } + return false; /*We now fill the chunk buffer with the remaining data */ - memcpy(pSHAState->pChunkBuffer, pData, dataLength); - pSHAState->nChunkLength = dataLength; + if (copy_from_user(sha_state->chunk_buffer, data, data_length)) + return false; + sha_state->chunk_length = data_length; + + return true; } #ifdef CONFIG_SMC_KERNEL_CRYPTO -static void PDrvCryptoInitHash(u32 alg, - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state) +static void tf_digest_init_operation(u32 alg, + struct tf_crypto_sha_operation_state *state) { - memset(state, 0, sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE)); + memset(state, 0, sizeof(struct tf_crypto_sha_operation_state)); state->CTRL = alg << 1; } @@ -663,7 +685,7 @@ static int static_Hash_HwReadDigest(u32 algo, u8 *out) } for (i = 0; i < regs; i++) { - tmp = INREG32(&pSha1Md5Reg_t->IDIGEST_A + i); + tmp = INREG32(&sha1_md5_reg->IDIGEST_A + i); out[idx++] = (u8) ((tmp >> 0) & 0xff); out[idx++] = (u8) ((tmp >> 8) & 0xff); @@ -674,13 +696,13 @@ static int static_Hash_HwReadDigest(u32 algo, u8 *out) return 0; } -static int PDrvCryptoFinalHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state, +static int tf_digest_final(struct tf_crypto_sha_operation_state *state, u8 *out) { - u32 *data = (u32 *) state->pChunkBuffer; + u32 *data = (u32 *) state->chunk_buffer; /* Hashing an empty string? */ - if (state->nBytesProcessed + state->nChunkLength == 0) { + if (state->bytes_processed + state->chunk_length == 0) { switch (DIGEST_MODE_GET_ALGO(state->CTRL)) { case DIGEST_CTRL_ALGO_MD5: memcpy(out, md5OverEmptyString, HASH_MD5_LENGTH); @@ -701,20 +723,20 @@ static int PDrvCryptoFinalHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state, return 0; } - PDrvCryptoRestoreHashRegisters(state); + tf_digest_restore_registers(state); /* * At this point, the chunk buffer should contain the last block of data * needed for the final. */ - OUTREG32(&pSha1Md5Reg_t->DIGEST_COUNT, state->nBytesProcessed); - OUTREG32(&pSha1Md5Reg_t->MODE, + OUTREG32(&sha1_md5_reg->DIGEST_COUNT, state->bytes_processed); + OUTREG32(&sha1_md5_reg->MODE, (state->CTRL & 0x6) | 0x10 | - (state->nBytesProcessed == 0) << 3); - OUTREG32(&pSha1Md5Reg_t->LENGTH, state->nChunkLength); + (state->bytes_processed == 0) << 3); + OUTREG32(&sha1_md5_reg->LENGTH, state->chunk_length); - if (SCXPublicCryptoWaitForReadyBit( - (u32 *) &pSha1Md5Reg_t->IRQSTATUS, + if (tf_crypto_wait_for_ready_bit( + (u32 *) &sha1_md5_reg->IRQSTATUS, DIGEST_IRQSTATUS_INPUT_READY_BIT) != PUBLIC_CRYPTO_OPERATION_SUCCESS) { /* Crash the system as this should never occur */ @@ -722,26 +744,26 @@ static int PDrvCryptoFinalHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state, "Input data to be ready\n"); } - OUTREG32(&pSha1Md5Reg_t->DIN_0, data[0]); - OUTREG32(&pSha1Md5Reg_t->DIN_1, data[1]); - OUTREG32(&pSha1Md5Reg_t->DIN_2, data[2]); - OUTREG32(&pSha1Md5Reg_t->DIN_3, data[3]); - OUTREG32(&pSha1Md5Reg_t->DIN_4, data[4]); - OUTREG32(&pSha1Md5Reg_t->DIN_5, data[5]); - OUTREG32(&pSha1Md5Reg_t->DIN_6, data[6]); - OUTREG32(&pSha1Md5Reg_t->DIN_7, data[7]); - OUTREG32(&pSha1Md5Reg_t->DIN_8, data[8]); - OUTREG32(&pSha1Md5Reg_t->DIN_9, data[9]); - OUTREG32(&pSha1Md5Reg_t->DIN_10, data[10]); - OUTREG32(&pSha1Md5Reg_t->DIN_11, data[11]); - OUTREG32(&pSha1Md5Reg_t->DIN_12, data[12]); - OUTREG32(&pSha1Md5Reg_t->DIN_13, data[13]); - OUTREG32(&pSha1Md5Reg_t->DIN_14, data[14]); - OUTREG32(&pSha1Md5Reg_t->DIN_15, data[15]); + OUTREG32(&sha1_md5_reg->DIN_0, data[0]); + OUTREG32(&sha1_md5_reg->DIN_1, data[1]); + OUTREG32(&sha1_md5_reg->DIN_2, data[2]); + OUTREG32(&sha1_md5_reg->DIN_3, data[3]); + OUTREG32(&sha1_md5_reg->DIN_4, data[4]); + OUTREG32(&sha1_md5_reg->DIN_5, data[5]); + OUTREG32(&sha1_md5_reg->DIN_6, data[6]); + OUTREG32(&sha1_md5_reg->DIN_7, data[7]); + OUTREG32(&sha1_md5_reg->DIN_8, data[8]); + OUTREG32(&sha1_md5_reg->DIN_9, data[9]); + OUTREG32(&sha1_md5_reg->DIN_10, data[10]); + OUTREG32(&sha1_md5_reg->DIN_11, data[11]); + OUTREG32(&sha1_md5_reg->DIN_12, data[12]); + OUTREG32(&sha1_md5_reg->DIN_13, data[13]); + OUTREG32(&sha1_md5_reg->DIN_14, data[14]); + OUTREG32(&sha1_md5_reg->DIN_15, data[15]); /* Wait till the hash operation is finished */ - SCXPublicCryptoWaitForReadyBitInfinitely( - (u32 *) &pSha1Md5Reg_t->IRQSTATUS, + tf_crypto_wait_for_ready_bit_infinitely( + (u32 *) &sha1_md5_reg->IRQSTATUS, DIGEST_IRQSTATUS_OUTPUT_READY_BIT); return static_Hash_HwReadDigest(DIGEST_MODE_GET_ALGO(state->CTRL), out); @@ -754,17 +776,20 @@ static int PDrvCryptoFinalHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state, static int digest_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc); + struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc); - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA); + /* Make sure SHA/MD5 HWA is accessible */ + tf_delayed_secure_resume(); - SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); + tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA); - PDrvCryptoUpdateHash(state, (u8 *) data, len); + tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); - SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); + tf_digest_update(state, (u8 *) data, len); - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA); + tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); + + tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA); return 0; } @@ -772,24 +797,27 @@ static int digest_update(struct shash_desc *desc, const u8 *data, static int digest_final(struct shash_desc *desc, u8 *out) { int ret; - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc); + struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc); + + /* Make sure SHA/MD5 HWA is accessible */ + tf_delayed_secure_resume(); - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA); + tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA); - SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); + tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); - ret = PDrvCryptoFinalHash(state, out); + ret = tf_digest_final(state, out); - SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); + tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG); - PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA); + tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA); return ret; } static int digest_import(struct shash_desc *desc, const void *in) { - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc); + struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc); memcpy(state, in, sizeof(*state)); return 0; @@ -797,7 +825,7 @@ static int digest_import(struct shash_desc *desc, const void *in) static int digest_export(struct shash_desc *desc, void *out) { - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc); + struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc); memcpy(out, state, sizeof(*state)); return 0; @@ -806,9 +834,9 @@ static int digest_export(struct shash_desc *desc, void *out) /* MD5 */ static int md5_init(struct shash_desc *desc) { - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc); + struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc); - PDrvCryptoInitHash(DIGEST_CTRL_ALGO_MD5, state); + tf_digest_init_operation(DIGEST_CTRL_ALGO_MD5, state); return 0; } @@ -820,8 +848,8 @@ static struct shash_alg smc_md5_alg = { .final = digest_final, .export = digest_export, .import = digest_import, - .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE), - .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE), + .descsize = sizeof(struct tf_crypto_sha_operation_state), + .statesize = sizeof(struct tf_crypto_sha_operation_state), .base = { .cra_name = "md5", .cra_driver_name = "md5-smc", @@ -835,9 +863,9 @@ static struct shash_alg smc_md5_alg = { /* SHA1 */ static int sha1_init(struct shash_desc *desc) { - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc); + struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc); - PDrvCryptoInitHash(DIGEST_CTRL_ALGO_SHA1, state); + tf_digest_init_operation(DIGEST_CTRL_ALGO_SHA1, state); return 0; } @@ -849,8 +877,8 @@ static struct shash_alg smc_sha1_alg = { .final = digest_final, .export = digest_export, .import = digest_import, - .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE), - .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE), + .descsize = sizeof(struct tf_crypto_sha_operation_state), + .statesize = sizeof(struct tf_crypto_sha_operation_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-smc", @@ -864,9 +892,9 @@ static struct shash_alg smc_sha1_alg = { /* SHA224 */ static int sha224_init(struct shash_desc *desc) { - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc); + struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc); - PDrvCryptoInitHash(DIGEST_CTRL_ALGO_SHA224, state); + tf_digest_init_operation(DIGEST_CTRL_ALGO_SHA224, state); return 0; } @@ -878,8 +906,8 @@ static struct shash_alg smc_sha224_alg = { .final = digest_final, .export = digest_export, .import = digest_import, - .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE), - .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE), + .descsize = sizeof(struct tf_crypto_sha_operation_state), + .statesize = sizeof(struct tf_crypto_sha_operation_state), .base = { .cra_name = "sha224", .cra_driver_name = "sha224-smc", @@ -893,9 +921,9 @@ static struct shash_alg smc_sha224_alg = { /* SHA256 */ static int sha256_init(struct shash_desc *desc) { - struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc); + struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc); - PDrvCryptoInitHash(DIGEST_CTRL_ALGO_SHA256, state); + tf_digest_init_operation(DIGEST_CTRL_ALGO_SHA256, state); return 0; } @@ -907,8 +935,8 @@ static struct shash_alg smc_sha256_alg = { .final = digest_final, .export = digest_export, .import = digest_import, - .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE), - .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE), + .descsize = sizeof(struct tf_crypto_sha_operation_state), + .statesize = sizeof(struct tf_crypto_sha_operation_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-smc", diff --git a/security/smc/omap4/scxlnx_defs.h b/security/smc/tf_defs.h index a6dcb9c..23dc7ca 100644 --- a/security/smc/omap4/scxlnx_defs.h +++ b/security/smc/tf_defs.h @@ -1,5 +1,5 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. +/** + * Copyright (c) 2011 Trusted Logic S.A. * All Rights Reserved. * * This program is free software; you can redistribute it and/or @@ -17,8 +17,8 @@ * MA 02111-1307 USA */ -#ifndef __SCXLNX_DEFS_H__ -#define __SCXLNX_DEFS_H__ +#ifndef __TF_DEFS_H__ +#define __TF_DEFS_H__ #include <asm/atomic.h> #include <linux/version.h> @@ -27,7 +27,6 @@ #include <linux/completion.h> #include <linux/list.h> #include <linux/spinlock.h> -#include <linux/sysdev.h> #include <linux/sysfs.h> #include <linux/sched.h> #include <linux/semaphore.h> @@ -35,7 +34,12 @@ #include <linux/wakelock.h> #endif -#include "scx_protocol.h" +#include "tf_protocol.h" + +#ifdef CONFIG_TF_ION +#include <linux/ion.h> +#include <linux/omap_ion.h> +#endif /*----------------------------------------------------------------------------*/ @@ -44,77 +48,77 @@ /* * Maximum number of shared memory blocks that can be reigsters in a connection */ -#define SCXLNX_SHMEM_MAX_COUNT (64) +#define TF_SHMEM_MAX_COUNT (64) /* * Describes the possible types of shared memories * - * SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM : + * TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM : * The descriptor describes a registered shared memory. * Its coarse pages are preallocated when initializing the * connection - * SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM : + * TF_SHMEM_TYPE_REGISTERED_SHMEM : * The descriptor describes a registered shared memory. * Its coarse pages are not preallocated - * SCXLNX_SHMEM_TYPE_PM_HIBERNATE : + * TF_SHMEM_TYPE_PM_HIBERNATE : * The descriptor describes a power management shared memory. */ -enum SCXLNX_SHMEM_TYPE { - SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0, - SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM, - SCXLNX_SHMEM_TYPE_PM_HIBERNATE, +enum TF_SHMEM_TYPE { + TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0, + TF_SHMEM_TYPE_REGISTERED_SHMEM, + TF_SHMEM_TYPE_PM_HIBERNATE, }; /* * This structure contains a pointer on a coarse page table */ -struct SCXLNX_COARSE_PAGE_TABLE { +struct tf_coarse_page_table { /* * Identifies the coarse page table descriptor in - * sFreeCoarsePageTables list + * free_coarse_page_tables list */ struct list_head list; /* * The address of the coarse page table */ - u32 *pDescriptors; + u32 *descriptors; /* * The address of the array containing this coarse page table */ - struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pParent; + struct tf_coarse_page_table_array *parent; }; -#define SCXLNX_PAGE_DESCRIPTOR_TYPE_NORMAL 0 -#define SCXLNX_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1 +#define TF_PAGE_DESCRIPTOR_TYPE_NORMAL 0 +#define TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1 /* * This structure describes an array of up to 4 coarse page tables * allocated within a single 4KB page. */ -struct SCXLNX_COARSE_PAGE_TABLE_ARRAY { +struct tf_coarse_page_table_array { /* - * identifies the element in the sCoarsePageTableArrays list + * identifies the element in the coarse_page_table_arrays list */ struct list_head list; /* * Type of page descriptor - * can take any of SCXLNX_PAGE_DESCRIPTOR_TYPE_XXX value + * can take any of TF_PAGE_DESCRIPTOR_TYPE_XXX value */ - u32 nType; + u32 type; - struct SCXLNX_COARSE_PAGE_TABLE sCoarsePageTables[4]; + struct tf_coarse_page_table coarse_page_tables[4]; /* * A counter of the number of coarse pages currently used * the max value should be 4 (one coarse page table is 1KB while one * page is 4KB) */ - u8 nReferenceCount; + u8 ref_count; }; @@ -124,7 +128,7 @@ struct SCXLNX_COARSE_PAGE_TABLE_ARRAY { * when the driver needs to allocate a new coarse page * table. */ -struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT { +struct tf_coarse_page_table_allocation_context { /* * The spin lock protecting concurrent access to the structure. */ @@ -133,19 +137,19 @@ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT { /* * The list of allocated coarse page table arrays */ - struct list_head sCoarsePageTableArrays; + struct list_head coarse_page_table_arrays; /* * The list of free coarse page tables */ - struct list_head sFreeCoarsePageTables; + struct list_head free_coarse_page_tables; }; /* * Fully describes a shared memory block */ -struct SCXLNX_SHMEM_DESC { +struct tf_shmem_desc { /* * Identifies the shared memory descriptor in the list of free shared * memory descriptors @@ -155,25 +159,25 @@ struct SCXLNX_SHMEM_DESC { /* * Identifies the type of shared memory descriptor */ - enum SCXLNX_SHMEM_TYPE nType; + enum TF_SHMEM_TYPE type; /* * The identifier of the block of shared memory, as returned by the * Secure World. - * This identifier is hBlock field of a REGISTER_SHARED_MEMORY answer + * This identifier is block field of a REGISTER_SHARED_MEMORY answer */ - u32 hIdentifier; + u32 block_identifier; /* Client buffer */ u8 *pBuffer; /* Up to eight coarse page table context */ - struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable[SCX_MAX_COARSE_PAGES]; + struct tf_coarse_page_table *coarse_pg_table[TF_MAX_COARSE_PAGES]; - u32 nNumberOfCoarsePageTables; + u32 coarse_pg_table_count; /* Reference counter */ - atomic_t nRefCnt; + atomic_t ref_count; }; @@ -184,7 +188,7 @@ struct SCXLNX_SHMEM_DESC { * * Note that this driver supports only one instance of the Secure World */ -struct SCXLNX_COMM { +struct tf_comm { /* * The spin lock protecting concurrent access to the structure. */ @@ -192,81 +196,81 @@ struct SCXLNX_COMM { /* * Bit vector with the following possible flags: - * - SCXLNX_COMM_FLAG_IRQ_REQUESTED: If set, indicates that + * - TF_COMM_FLAG_IRQ_REQUESTED: If set, indicates that * the IRQ has been successfuly requested. - * - SCXLNX_COMM_FLAG_TERMINATING: If set, indicates that the + * - TF_COMM_FLAG_TERMINATING: If set, indicates that the * communication with the Secure World is being terminated. * Transmissions to the Secure World are not permitted - * - SCXLNX_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the + * - TF_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the * W3B buffer has been allocated. * * This bit vector must be accessed with the kernel's atomic bitwise * operations. */ - unsigned long nFlags; + unsigned long flags; /* * The virtual address of the L1 shared buffer. */ - struct SCHANNEL_C1S_BUFFER *pBuffer; + struct tf_l1_shared_buffer *pBuffer; /* * The wait queue the client threads are waiting on. */ - wait_queue_head_t waitQueue; + wait_queue_head_t wait_queue; #ifdef CONFIG_TF_TRUSTZONE /* * The interrupt line used by the Secure World. */ - int nSoftIntIrq; + int soft_int_irq; /* ----- W3B ----- */ /* shared memory descriptor to identify the W3B */ - struct SCXLNX_SHMEM_DESC sW3BShmemDesc; + struct tf_shmem_desc w3b_shmem_desc; /* Virtual address of the kernel allocated shared memory */ - u32 nW3BShmemVAddr; + u32 w3b; /* offset of data in shared memory coarse pages */ - u32 nW3BShmemOffset; + u32 w3b_shmem_offset; - u32 nW3BShmemSize; + u32 w3b_shmem_size; - struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT - sW3BAllocationContext; + struct tf_coarse_page_table_allocation_context + w3b_cpt_alloc_context; #endif -#ifdef CONFIG_TF_MSHIELD +#ifdef CONFIG_TF_ZEBRA /* * The SE SDP can only be initialized once... */ - int bSEInitialized; + int se_initialized; /* Virtual address of the L0 communication buffer */ - void *pInitSharedBuffer; + void *init_shared_buffer; /* * Lock to be held by a client when executing an RPC */ - struct mutex sRPCLock; + struct mutex rpc_mutex; /* * Lock to protect concurrent accesses to DMA channels */ - struct mutex sDMALock; + struct mutex dma_mutex; #endif }; -#define SCXLNX_COMM_FLAG_IRQ_REQUESTED (0) -#define SCXLNX_COMM_FLAG_PA_AVAILABLE (1) -#define SCXLNX_COMM_FLAG_TERMINATING (2) -#define SCXLNX_COMM_FLAG_W3B_ALLOCATED (3) -#define SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED (4) +#define TF_COMM_FLAG_IRQ_REQUESTED (0) +#define TF_COMM_FLAG_PA_AVAILABLE (1) +#define TF_COMM_FLAG_TERMINATING (2) +#define TF_COMM_FLAG_W3B_ALLOCATED (3) +#define TF_COMM_FLAG_L1_SHARED_ALLOCATED (4) /*----------------------------------------------------------------------------*/ -struct SCXLNX_DEVICE_STATS { +struct tf_device_stats { struct kobject kobj; struct kobj_type kobj_type; @@ -283,89 +287,79 @@ struct SCXLNX_DEVICE_STATS { /* * This structure describes the information about one device handled by the * driver. Note that the driver supports only a single device. see the global - * variable g_SCXLNXDevice + * variable g_tf_dev + */ -struct SCXLNX_DEVICE { +struct tf_device { /* * The device number for the device. */ - dev_t nDevNum; - - /* - * Interfaces the system device with the kernel. - */ - struct sys_device sysdev; + dev_t dev_number; /* * Interfaces the char device with the kernel. */ struct cdev cdev; -#ifdef CONFIG_TF_MSHIELD +#ifdef CONFIG_TF_TEEC + struct cdev cdev_teec; +#endif + +#ifdef CONFIG_TF_ZEBRA struct cdev cdev_ctrl; /* * Globals for CUS */ /* Current key handles loaded in HWAs */ - u32 hAES1SecureKeyContext; - u32 hAES2SecureKeyContext; - u32 hDESSecureKeyContext; - bool bSHAM1IsPublic; + u32 aes1_key_context; + u32 des_key_context; + bool sham1_is_public; - /* Semaphores used to serialize HWA accesses */ - struct semaphore sAES1CriticalSection; - struct semaphore sAES2CriticalSection; - struct mutex sDESCriticalSection; - struct mutex sSHACriticalSection; + /* Object used to serialize HWA accesses */ + struct semaphore aes1_sema; + struct semaphore des_sema; + struct semaphore sha_sema; /* * An aligned and correctly shaped pre-allocated buffer used for DMA * transfers */ - u32 nDMABufferLength; - u8 *pDMABuffer; - dma_addr_t pDMABufferPhys; + u32 dma_buffer_length; + u8 *dma_buffer; + dma_addr_t dma_buffer_phys; /* Workspace allocated at boot time and reserved to the Secure World */ - u32 nWorkspaceAddr; - u32 nWorkspaceSize; + u32 workspace_addr; + u32 workspace_size; + + /* + * A Mutex to provide exclusive locking of the ioctl() + */ + struct mutex dev_mutex; #endif /* * Communications with the SM. */ - struct SCXLNX_COMM sm; + struct tf_comm sm; /* * Lists the connections attached to this device. A connection is * created each time a user space application "opens" a file descriptor * on the driver */ - struct list_head conns; + struct list_head connection_list; /* * The spin lock used to protect concurrent access to the connection * list. */ - spinlock_t connsLock; - - struct SCXLNX_DEVICE_STATS sDeviceStats; + spinlock_t connection_list_lock; - /* - * A Mutex to provide exlusive locking of the ioctl() - */ - struct mutex dev_mutex; + struct tf_device_stats stats; }; -/* the bits of the nFlags field of the SCXLNX_DEVICE structure */ -#define SCXLNX_DEVICE_FLAG_CDEV_INITIALIZED (0) -#define SCXLNX_DEVICE_FLAG_SYSDEV_CLASS_REGISTERED (1) -#define SCXLNX_DEVICE_FLAG_SYSDEV_REGISTERED (2) -#define SCXLNX_DEVICE_FLAG_CDEV_REGISTERED (3) -#define SCXLNX_DEVICE_FLAG_CDEV_ADDED (4) -#define SCXLNX_DEVICE_SYSFS_REGISTERED (5) - /*----------------------------------------------------------------------------*/ /* * This type describes a connection state. @@ -375,24 +369,24 @@ struct SCXLNX_DEVICE { * Messages may be invalidated between the start of the ioctl call and the * moment the message is sent to the Secure World. * - * SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT : + * TF_CONN_STATE_NO_DEVICE_CONTEXT : * The connection has no DEVICE_CONTEXT created and no * CREATE_DEVICE_CONTEXT being processed by the Secure World - * SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT : + * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT : * The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure * World - * SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT : + * TF_CONN_STATE_VALID_DEVICE_CONTEXT : * The connection has a DEVICE_CONTEXT created and no * DESTROY_DEVICE_CONTEXT is being processed by the Secure World - * SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT : + * TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT : * The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure * World */ -enum SCXLNX_CONN_STATE { - SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT = 0, - SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT, - SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT, - SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT +enum TF_CONN_STATE { + TF_CONN_STATE_NO_DEVICE_CONTEXT = 0, + TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT, + TF_CONN_STATE_VALID_DEVICE_CONTEXT, + TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT }; @@ -408,10 +402,22 @@ enum SCXLNX_CONN_STATE { * Note that this only covers the case where some other thread * sent a DESTROY_DEVICE_CONTEXT command. */ -enum SCXLNX_COMMAND_STATE { - SCXLNX_COMMAND_STATE_PENDING = 0, - SCXLNX_COMMAND_STATE_SENT, - SCXLNX_COMMAND_STATE_ABORTED +enum TF_COMMAND_STATE { + TF_COMMAND_STATE_PENDING = 0, + TF_COMMAND_STATE_SENT, + TF_COMMAND_STATE_ABORTED +}; + +/* + * The origin of connection parameters such as login data and + * memory reference pointers. + * + * PROCESS: the calling process. All arguments must be validated. + * KERNEL: kernel code. All arguments can be trusted by this driver. + */ +enum TF_CONNECTION_OWNER { + TF_CONNECTION_OWNER_PROCESS = 0, + TF_CONNECTION_OWNER_KERNEL, }; @@ -420,7 +426,7 @@ enum SCXLNX_COMMAND_STATE { * A connection is created each time an application opens a file descriptor on * the driver */ -struct SCXLNX_CONNECTION { +struct tf_connection { /* * Identifies the connection in the list of the connections attached to * the same device. @@ -430,80 +436,88 @@ struct SCXLNX_CONNECTION { /* * State of the connection. */ - enum SCXLNX_CONN_STATE nState; + enum TF_CONN_STATE state; /* * A pointer to the corresponding device structure */ - struct SCXLNX_DEVICE *pDevice; + struct tf_device *dev; /* - * A spinlock to use to access nState + * A spinlock to use to access state */ - spinlock_t stateLock; + spinlock_t state_lock; /* * Counts the number of operations currently pending on the connection. * (for debug only) */ - atomic_t nPendingOpCounter; + atomic_t pending_op_count; /* * A handle for the device context */ - u32 hDeviceContext; + u32 device_context; /* * Lists the used shared memory descriptors */ - struct list_head sUsedSharedMemoryList; + struct list_head used_shmem_list; /* * Lists the free shared memory descriptors */ - struct list_head sFreeSharedMemoryList; + struct list_head free_shmem_list; /* * A mutex to use to access this structure */ - struct mutex sharedMemoriesMutex; + struct mutex shmem_mutex; /* * Counts the number of shared memories registered. */ - atomic_t nShmemAllocated; + atomic_t shmem_count; /* * Page to retrieve memory properties when * registering shared memory through REGISTER_SHARED_MEMORY * messages */ - struct vm_area_struct **ppVmas; + struct vm_area_struct **vmas; /* * coarse page table allocation context */ - struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT sAllocationContext; + struct tf_coarse_page_table_allocation_context cpt_alloc_context; + + /* The origin of connection parameters such as login data and + memory reference pointers. */ + enum TF_CONNECTION_OWNER owner; -#ifdef CONFIG_TF_MSHIELD +#ifdef CONFIG_TF_ZEBRA /* Lists all the Cryptoki Update Shortcuts */ - struct list_head ShortcutList; + struct list_head shortcut_list; + + /* Lock to protect concurrent accesses to shortcut_list */ + spinlock_t shortcut_list_lock; +#endif - /* Lock to protect concurrent accesses to ShortcutList */ - spinlock_t shortcutListCriticalSectionLock; +#ifdef CONFIG_TF_ION + struct ion_client *ion_client; #endif }; /*----------------------------------------------------------------------------*/ /* - * The nOperationID field of a message points to this structure. + * The operation_id field of a message points to this structure. * It is used to identify the thread that triggered the message transmission * Whoever reads an answer can wake up that thread using the completion event */ -struct SCXLNX_ANSWER_STRUCT { - bool bAnswerCopied; - union SCX_ANSWER_MESSAGE *pAnswer; +struct tf_answer_struct { + bool answer_copied; + union tf_answer *answer; }; /*----------------------------------------------------------------------------*/ @@ -512,16 +526,16 @@ struct SCXLNX_ANSWER_STRUCT { * The ASCII-C string representation of the base name of the devices managed by * this driver. */ -#define SCXLNX_DEVICE_BASE_NAME "tf_driver" +#define TF_DEVICE_BASE_NAME "tf_driver" /** * The major and minor numbers of the registered character device driver. * Only 1 instance of the driver is supported. */ -#define SCXLNX_DEVICE_MINOR_NUMBER (0) +#define TF_DEVICE_MINOR_NUMBER (0) -struct SCXLNX_DEVICE *SCXLNXGetDevice(void); +struct tf_device *tf_get_device(void); #define CLEAN_CACHE_CFG_MASK (~0xC) /* 1111 0011 */ @@ -536,4 +550,4 @@ struct SCXLNX_DEVICE *SCXLNXGetDevice(void); #define GROUP_INFO (current->group_info) #endif -#endif /* !defined(__SCXLNX_DEFS_H__) */ +#endif /* !defined(__TF_DEFS_H__) */ diff --git a/security/smc/tf_device.c b/security/smc/tf_device.c new file mode 100644 index 0000000..7c2c623 --- /dev/null +++ b/security/smc/tf_device.c @@ -0,0 +1,728 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <asm/atomic.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/page-flags.h> +#include <linux/pm.h> +#include <linux/syscore_ops.h> +#include <linux/vmalloc.h> +#include <linux/signal.h> +#ifdef CONFIG_ANDROID +#include <linux/device.h> +#endif + +#include "tf_protocol.h" +#include "tf_defs.h" +#include "tf_util.h" +#include "tf_conn.h" +#include "tf_comm.h" +#ifdef CONFIG_TF_ZEBRA +#include <plat/cpu.h> +#include "tf_zebra.h" +#endif + +#include "s_version.h" + +#ifdef CONFIG_TF_ION +extern struct ion_device *omap_ion_device; +#endif + +/*---------------------------------------------------------------------------- + * Forward Declarations + *----------------------------------------------------------------------------*/ + +/* + * Creates and registers the device to be managed by the specified driver. + * + * Returns zero upon successful completion, or an appropriate error code upon + * failure. + */ +static int tf_device_register(void); + + +/* + * Implements the device Open callback. + */ +static int tf_device_open( + struct inode *inode, + struct file *file); + + +/* + * Implements the device Release callback. + */ +static int tf_device_release( + struct inode *inode, + struct file *file); + + +/* + * Implements the device ioctl callback. + */ +static long tf_device_ioctl( + struct file *file, + unsigned int ioctl_num, + unsigned long ioctl_param); + + +/* + * Implements the device shutdown callback. + */ +static void tf_device_shutdown(void); + + +/* + * Implements the device suspend callback. + */ +static int tf_device_suspend(void); + + +/* + * Implements the device resume callback. + */ +static void tf_device_resume(void); + + +/*--------------------------------------------------------------------------- + * Module Parameters + *---------------------------------------------------------------------------*/ + +/* + * The device major number used to register a unique character device driver. + * Let the default value be 122 + */ +static int device_major_number = 122; + +module_param(device_major_number, int, 0000); +MODULE_PARM_DESC(device_major_number, + "The device major number used to register a unique character " + "device driver"); + +#ifdef CONFIG_TF_TRUSTZONE +/** + * The softint interrupt line used by the Secure World. + */ +static int soft_interrupt = -1; + +module_param(soft_interrupt, int, 0000); +MODULE_PARM_DESC(soft_interrupt, + "The softint interrupt line used by the Secure world"); +#endif + +#ifdef CONFIG_ANDROID +static struct class *tf_class; +#endif + +/*---------------------------------------------------------------------------- + * Global Variables + *----------------------------------------------------------------------------*/ + +/* + * tf_driver character device definitions. + * read and write methods are not defined + * and will return an error if used by user space + */ +static const struct file_operations g_tf_device_file_ops = { + .owner = THIS_MODULE, + .open = tf_device_open, + .release = tf_device_release, + .unlocked_ioctl = tf_device_ioctl, + .llseek = no_llseek, +}; + +/* The single device supported by this driver */ +static struct tf_device g_tf_dev = {0, }; + +/*---------------------------------------------------------------------------- + * Implementations + *----------------------------------------------------------------------------*/ + +struct tf_device *tf_get_device(void) +{ + return &g_tf_dev; +} + +/* + * displays the driver stats + */ +static ssize_t kobject_show(struct kobject *kobj, + struct attribute *attribute, char *buf) +{ + struct tf_device_stats *dev_stats = &g_tf_dev.stats; + u32 pages_allocated; + u32 pages_locked; + u32 memories_allocated; + + memories_allocated = + atomic_read(&(dev_stats->stat_memories_allocated)); + pages_allocated = + atomic_read(&(dev_stats->stat_pages_allocated)); + pages_locked = atomic_read(&(dev_stats->stat_pages_locked)); + + /* + * AFY: could we add the number of context switches (call to the SMI + * instruction) + */ + + return snprintf(buf, PAGE_SIZE, + "stat.memories.allocated: %d\n" + "stat.pages.allocated: %d\n" + "stat.pages.locked: %d\n", + memories_allocated, + pages_allocated, + pages_locked); +} + +static const struct sysfs_ops kobj_sysfs_operations = { + .show = kobject_show, +}; + +/*----------------------------------------------------------------------------*/ + +static const struct syscore_ops g_tf_syscore_ops = { + .shutdown = tf_device_shutdown, + .suspend = tf_device_suspend, + .resume = tf_device_resume, +}; + +/* + * First routine called when the kernel module is loaded + */ +static int __init tf_device_register(void) +{ + int error; + struct tf_device *dev = &g_tf_dev; + struct tf_device_stats *dev_stats = &dev->stats; + + dprintk(KERN_INFO "tf_device_register()\n"); + + /* + * Initialize the device + */ + dev->dev_number = MKDEV(device_major_number, + TF_DEVICE_MINOR_NUMBER); + cdev_init(&dev->cdev, &g_tf_device_file_ops); + dev->cdev.owner = THIS_MODULE; + + INIT_LIST_HEAD(&dev->connection_list); + spin_lock_init(&dev->connection_list_lock); + + /* register the sysfs object driver stats */ + dev_stats->kobj_type.sysfs_ops = &kobj_sysfs_operations; + + dev_stats->kobj_stat_attribute.name = "info"; + dev_stats->kobj_stat_attribute.mode = S_IRUGO; + dev_stats->kobj_attribute_list[0] = + &dev_stats->kobj_stat_attribute; + + dev_stats->kobj_type.default_attrs = + dev_stats->kobj_attribute_list, + error = kobject_init_and_add(&(dev_stats->kobj), + &(dev_stats->kobj_type), NULL, "%s", + TF_DEVICE_BASE_NAME); + if (error) { + kobject_put(&dev_stats->kobj); + goto kobject_init_and_add_failed; + } + + register_syscore_ops((struct syscore_ops *)&g_tf_syscore_ops); + + /* + * Register the char device. + */ + printk(KERN_INFO "Registering char device %s (%u:%u)\n", + TF_DEVICE_BASE_NAME, + MAJOR(dev->dev_number), + MINOR(dev->dev_number)); + error = register_chrdev_region(dev->dev_number, 1, + TF_DEVICE_BASE_NAME); + if (error != 0) { + printk(KERN_ERR "tf_device_register():" + " register_chrdev_region failed (error %d)!\n", + error); + goto register_chrdev_region_failed; + } + + error = cdev_add(&dev->cdev, dev->dev_number, 1); + if (error != 0) { + printk(KERN_ERR "tf_device_register(): " + "cdev_add failed (error %d)!\n", + error); + goto cdev_add_failed; + } + + /* + * Initialize the communication with the Secure World. + */ +#ifdef CONFIG_TF_TRUSTZONE + dev->sm.soft_int_irq = soft_interrupt; +#endif + error = tf_init(&g_tf_dev.sm); + if (error != S_SUCCESS) { + dprintk(KERN_ERR "tf_device_register(): " + "tf_init failed (error %d)!\n", + error); + goto init_failed; + } + +#ifdef CONFIG_ANDROID + tf_class = class_create(THIS_MODULE, TF_DEVICE_BASE_NAME); + device_create(tf_class, NULL, + dev->dev_number, + NULL, TF_DEVICE_BASE_NAME); +#endif + +#ifdef CONFIG_TF_ZEBRA + /* + * Initializes the /dev/tf_ctrl device node. + */ + error = tf_ctrl_device_register(); + if (error) + goto init_failed; +#endif + +#ifdef CONFIG_BENCH_SECURE_CYCLE + run_bogo_mips(); + address_cache_property((unsigned long) &tf_device_register); +#endif + /* + * Successful completion. + */ + + dprintk(KERN_INFO "tf_device_register(): Success\n"); + return 0; + + /* + * Error: undo all operations in the reverse order + */ +init_failed: + cdev_del(&dev->cdev); +cdev_add_failed: + unregister_chrdev_region(dev->dev_number, 1); +register_chrdev_region_failed: + unregister_syscore_ops((struct syscore_ops *)&g_tf_syscore_ops); +kobject_init_and_add_failed: + kobject_del(&g_tf_dev.stats.kobj); + + dprintk(KERN_INFO "tf_device_register(): Failure (error %d)\n", + error); + return error; +} + +/*----------------------------------------------------------------------------*/ + +static int tf_device_open(struct inode *inode, struct file *file) +{ + int error; + struct tf_device *dev = &g_tf_dev; + struct tf_connection *connection = NULL; + + dprintk(KERN_INFO "tf_device_open(%u:%u, %p)\n", + imajor(inode), iminor(inode), file); + + /* Dummy lseek for non-seekable driver */ + error = nonseekable_open(inode, file); + if (error != 0) { + dprintk(KERN_ERR "tf_device_open(%p): " + "nonseekable_open failed (error %d)!\n", + file, error); + goto error; + } + +#ifndef CONFIG_ANDROID + /* + * Check file flags. We only autthorize the O_RDWR access + */ + if (file->f_flags != O_RDWR) { + dprintk(KERN_ERR "tf_device_open(%p): " + "Invalid access mode %u\n", + file, file->f_flags); + error = -EACCES; + goto error; + } +#endif + + /* + * Open a new connection. + */ + + error = tf_open(dev, file, &connection); + if (error != 0) { + dprintk(KERN_ERR "tf_device_open(%p): " + "tf_open failed (error %d)!\n", + file, error); + goto error; + } + + file->private_data = connection; + + /* + * Send the CreateDeviceContext command to the secure + */ + error = tf_create_device_context(connection); + if (error != 0) { + dprintk(KERN_ERR "tf_device_open(%p): " + "tf_create_device_context failed (error %d)!\n", + file, error); + goto error1; + } + + /* + * Successful completion. + */ + + dprintk(KERN_INFO "tf_device_open(%p): Success (connection=%p)\n", + file, connection); + return 0; + + /* + * Error handling. + */ + +error1: + tf_close(connection); +error: + dprintk(KERN_INFO "tf_device_open(%p): Failure (error %d)\n", + file, error); + return error; +} + +/*----------------------------------------------------------------------------*/ + +static int tf_device_release(struct inode *inode, struct file *file) +{ + struct tf_connection *connection; + + dprintk(KERN_INFO "tf_device_release(%u:%u, %p)\n", + imajor(inode), iminor(inode), file); + + connection = tf_conn_from_file(file); + tf_close(connection); + + dprintk(KERN_INFO "tf_device_release(%p): Success\n", file); + return 0; +} + +/*----------------------------------------------------------------------------*/ + +static long tf_device_ioctl(struct file *file, unsigned int ioctl_num, + unsigned long ioctl_param) +{ + int result = S_SUCCESS; + struct tf_connection *connection; + union tf_command command; + struct tf_command_header header; + union tf_answer answer; + u32 command_size; + u32 answer_size; + void *user_answer; + + dprintk(KERN_INFO "tf_device_ioctl(%p, %u, %p)\n", + file, ioctl_num, (void *) ioctl_param); + + switch (ioctl_num) { + case IOCTL_TF_GET_VERSION: + /* ioctl is asking for the driver interface version */ + result = TF_DRIVER_INTERFACE_VERSION; + goto exit; + +#ifdef CONFIG_TF_ION + case IOCTL_TF_ION_REGISTER: { + int ion_register; + /* ioctl is asking to register an ion handle */ + if (copy_from_user(&ion_register, + (int *) ioctl_param, + sizeof(int))) { + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "copy_from_user failed\n", + file); + result = -EFAULT; + goto exit; + } + + connection = tf_conn_from_file(file); + BUG_ON(connection == NULL); + + /* Initialize ION connection */ + if (connection->ion_client == NULL) { + connection->ion_client = ion_client_create( + omap_ion_device, + (1 << ION_HEAP_TYPE_CARVEOUT), + "smc"); + } + + if (connection->ion_client == NULL) { + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "unable to create ion client\n", + file); + result = -EFAULT; + goto exit; + } + + /* + * TODO: We should use a reference count on this handle in order + * to not unregistered it while using it. + */ + return (long)ion_import_fd(connection->ion_client, ion_register); + } + + case IOCTL_TF_ION_UNREGISTER: { + int ion_register; + /* ioctl is asking to unregister an ion handle */ + + if (copy_from_user(&ion_register, + (int *) ioctl_param, + sizeof(int))) { + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "copy_from_user failed\n", + file); + result = -EFAULT; + goto exit; + } + + connection = tf_conn_from_file(file); + BUG_ON(connection == NULL); + + if (connection->ion_client == NULL) { + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "ion client does not exist\n", + file); + result = -EFAULT; + goto exit; + } + + ion_free(connection->ion_client, + (struct ion_handle *) ion_register); + + return S_SUCCESS; + } +#endif + + case IOCTL_TF_EXCHANGE: + /* + * ioctl is asking to perform a message exchange with the Secure + * Module + */ + + /* + * Make a local copy of the data from the user application + * This routine checks the data is readable + * + * Get the header first. + */ + if (copy_from_user(&header, + (struct tf_command_header *)ioctl_param, + sizeof(struct tf_command_header))) { + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "Cannot access ioctl parameter %p\n", + file, (void *) ioctl_param); + result = -EFAULT; + goto exit; + } + + /* size in words of u32 */ + command_size = header.message_size + + sizeof(struct tf_command_header)/sizeof(u32); + if (command_size > sizeof(command)/sizeof(u32)) { + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "Buffer overflow: too many bytes to copy %d\n", + file, command_size); + result = -EFAULT; + goto exit; + } + + if (copy_from_user(&command, + (union tf_command *)ioctl_param, + command_size * sizeof(u32))) { + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "Cannot access ioctl parameter %p\n", + file, (void *) ioctl_param); + result = -EFAULT; + goto exit; + } + + connection = tf_conn_from_file(file); + BUG_ON(connection == NULL); + + /* + * The answer memory space address is in the operation_id field + */ + user_answer = (void *) command.header.operation_id; + + atomic_inc(&(connection->pending_op_count)); + + dprintk(KERN_WARNING "tf_device_ioctl(%p): " + "Sending message type 0x%08x\n", + file, command.header.message_type); + + switch (command.header.message_type) { + case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION: + result = tf_open_client_session(connection, + &command, &answer); + break; + + case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION: + result = tf_close_client_session(connection, + &command, &answer); + break; + + case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY: + result = tf_register_shared_memory(connection, + &command, &answer); + break; + + case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY: + result = tf_release_shared_memory(connection, + &command, &answer); + break; + + case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND: + result = tf_invoke_client_command(connection, + &command, &answer); + break; + + case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND: + result = tf_cancel_client_command(connection, + &command, &answer); + break; + + default: + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "Incorrect message type (0x%08x)!\n", + connection, command.header.message_type); + result = -EOPNOTSUPP; + break; + } + + atomic_dec(&(connection->pending_op_count)); + + if (result != 0) { + dprintk(KERN_WARNING "tf_device_ioctl(%p): " + "Operation returning error code 0x%08x)!\n", + file, result); + goto exit; + } + + /* + * Copy the answer back to the user space application. + * The driver does not check this field, only copy back to user + * space the data handed over by Secure World + */ + answer_size = answer.header.message_size + + sizeof(struct tf_answer_header)/sizeof(u32); + if (copy_to_user(user_answer, + &answer, answer_size * sizeof(u32))) { + dprintk(KERN_WARNING "tf_device_ioctl(%p): " + "Failed to copy back the full command " + "answer to %p\n", file, user_answer); + result = -EFAULT; + goto exit; + } + + /* successful completion */ + dprintk(KERN_INFO "tf_device_ioctl(%p): Success\n", file); + break; + + case IOCTL_TF_GET_DESCRIPTION: { + /* ioctl asking for the version information buffer */ + struct tf_version_information_buffer *pInfoBuffer; + + dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION:(%p, %u, %p)\n", + file, ioctl_num, (void *) ioctl_param); + + pInfoBuffer = + ((struct tf_version_information_buffer *) ioctl_param); + + dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION1: " + "driver_description=\"%64s\"\n", S_VERSION_STRING); + + if (copy_to_user(pInfoBuffer->driver_description, + S_VERSION_STRING, + strlen(S_VERSION_STRING) + 1)) { + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "Fail to copy back the driver description " + "to %p\n", + file, pInfoBuffer->driver_description); + result = -EFAULT; + goto exit; + } + + dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION2: " + "secure_world_description=\"%64s\"\n", + tf_get_description(&g_tf_dev.sm)); + + if (copy_to_user(pInfoBuffer->secure_world_description, + tf_get_description(&g_tf_dev.sm), + TF_DESCRIPTION_BUFFER_LENGTH)) { + dprintk(KERN_WARNING "tf_device_ioctl(%p): " + "Failed to copy back the secure world " + "description to %p\n", + file, pInfoBuffer->secure_world_description); + result = -EFAULT; + goto exit; + } + break; + } + + default: + dprintk(KERN_ERR "tf_device_ioctl(%p): " + "Unknown IOCTL code 0x%08x!\n", + file, ioctl_num); + result = -EOPNOTSUPP; + goto exit; + } + +exit: + return result; +} + +/*----------------------------------------------------------------------------*/ + +static void tf_device_shutdown(void) +{ + tf_power_management(&g_tf_dev.sm, TF_POWER_OPERATION_SHUTDOWN); +} + +/*----------------------------------------------------------------------------*/ + +static int tf_device_suspend(void) +{ + dprintk(KERN_INFO "%s\n", __func__); + return tf_power_management(&g_tf_dev.sm, TF_POWER_OPERATION_HIBERNATE); +} + + +/*----------------------------------------------------------------------------*/ + +static void tf_device_resume(void) +{ + tf_power_management(&g_tf_dev.sm, TF_POWER_OPERATION_RESUME); +} + + +/*----------------------------------------------------------------------------*/ + +module_init(tf_device_register); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Trusted Logic S.A."); diff --git a/security/smc/tf_device_mshield.c b/security/smc/tf_device_mshield.c new file mode 100644 index 0000000..17f1451 --- /dev/null +++ b/security/smc/tf_device_mshield.c @@ -0,0 +1,351 @@ +/** + * Copyright (c) 2010 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <asm/atomic.h> +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/page-flags.h> +#include <linux/pm.h> +#include <linux/sysdev.h> +#include <linux/vmalloc.h> +#include <linux/signal.h> +#ifdef CONFIG_ANDROID +#include <linux/device.h> +#endif +#include <linux/init.h> +#include <linux/bootmem.h> + +#include "tf_protocol.h" +#include "tf_defs.h" +#include "tf_util.h" +#include "tf_conn.h" +#include "tf_comm.h" +#include "tf_zebra.h" + +#include "s_version.h" + +#define TF_PA_CTRL_START 0x1 +#define TF_PA_CTRL_STOP 0x2 + +#ifdef CONFIG_ANDROID +static struct class *tf_ctrl_class; +#endif + +#define TF_DEVICE_CTRL_BASE_NAME "tf_ctrl" + +struct tf_pa_ctrl { + u32 nPACommand; + + u32 pa_size; + u8 *pa_buffer; + + u32 conf_size; + u8 *conf_buffer; +}; + +static int tf_ctrl_check_omap_type(void) +{ + /* No need to do anything on a GP device */ + switch (omap_type()) { + case OMAP2_DEVICE_TYPE_GP: + dprintk(KERN_INFO "SMC: Running on a GP device\n"); + return 0; + + case OMAP2_DEVICE_TYPE_EMU: + case OMAP2_DEVICE_TYPE_SEC: + /*case OMAP2_DEVICE_TYPE_TEST:*/ + dprintk(KERN_INFO "SMC: Running on a EMU or HS device\n"); + return 1; + + default: + printk(KERN_ERR "SMC: unknown omap type %x\n", omap_type()); + return -EFAULT; + } +} + +#define IOCTL_TF_PA_CTRL _IOWR('z', 0xFF, struct tf_pa_ctrl) + +static long tf_ctrl_device_ioctl(struct file *file, unsigned int ioctl_num, + unsigned long ioctl_param) +{ + int result = S_SUCCESS; + struct tf_pa_ctrl pa_ctrl; + u8 *pa_buffer = NULL; + u8 *conf_buffer = NULL; + struct tf_device *dev = tf_get_device(); + + dprintk(KERN_INFO "tf_ctrl_device_ioctl(%p, %u, %p)\n", + file, ioctl_num, (void *) ioctl_param); + + mutex_lock(&dev->dev_mutex); + + if (ioctl_num != IOCTL_TF_PA_CTRL) { + dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): " + "ioctl number is invalid (%p)\n", + file, (void *)ioctl_num); + + result = -EFAULT; + goto exit; + } + + if ((ioctl_param & 0x3) != 0) { + dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): " + "ioctl command message pointer is not word " + "aligned (%p)\n", + file, (void *)ioctl_param); + + result = -EFAULT; + goto exit; + } + + if (copy_from_user(&pa_ctrl, (struct tf_pa_ctrl *)ioctl_param, + sizeof(struct tf_pa_ctrl))) { + dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): " + "cannot access ioctl parameter (%p)\n", + file, (void *)ioctl_param); + + result = -EFAULT; + goto exit; + } + + switch (pa_ctrl.nPACommand) { + case TF_PA_CTRL_START: + dprintk(KERN_INFO "tf_ctrl_device_ioctl(%p): " + "Start the SMC PA (%d bytes) with conf (%d bytes)\n", + file, pa_ctrl.pa_size, pa_ctrl.conf_size); + + pa_buffer = (u8 *) internal_kmalloc(pa_ctrl.pa_size, + GFP_KERNEL); + if (pa_buffer == NULL) { + dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): " + "Out of memory for PA buffer\n", file); + + result = -ENOMEM; + goto exit; + } + + if (copy_from_user( + pa_buffer, pa_ctrl.pa_buffer, pa_ctrl.pa_size)) { + dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): " + "Cannot access PA buffer (%p)\n", + file, (void *) pa_ctrl.pa_buffer); + + internal_kfree(pa_buffer); + + result = -EFAULT; + goto exit; + } + + if (pa_ctrl.conf_size > 0) { + conf_buffer = (u8 *) internal_kmalloc( + pa_ctrl.conf_size, GFP_KERNEL); + if (conf_buffer == NULL) { + internal_kfree(pa_buffer); + + result = -ENOMEM; + goto exit; + } + + if (copy_from_user(conf_buffer, + pa_ctrl.conf_buffer, pa_ctrl.conf_size)) { + internal_kfree(pa_buffer); + internal_kfree(conf_buffer); + + result = -EFAULT; + goto exit; + } + } + + if (dev->workspace_addr == 0) { + result = -ENOMEM; + goto exit; + } + + result = tf_start(&dev->sm, + dev->workspace_addr, + dev->workspace_size, + pa_buffer, + pa_ctrl.pa_size, + conf_buffer, + pa_ctrl.conf_size); + if (result) + dprintk(KERN_ERR "SMC: start failed\n"); + else + dprintk(KERN_INFO "SMC: started\n"); + + internal_kfree(pa_buffer); + internal_kfree(conf_buffer); + break; + + case TF_PA_CTRL_STOP: + dprintk(KERN_INFO "tf_ctrl_device_ioctl(%p): " + "Stop the SMC PA\n", file); + + result = tf_power_management(&dev->sm, + TF_POWER_OPERATION_SHUTDOWN); + if (result) + dprintk(KERN_WARNING "SMC: stop failed [0x%x]\n", + result); + else + dprintk(KERN_INFO "SMC: stopped\n"); + break; + + default: + result = -EOPNOTSUPP; + break; + } + +exit: + mutex_unlock(&dev->dev_mutex); + return result; +} + +/*----------------------------------------------------------------------------*/ + +static int tf_ctrl_device_open(struct inode *inode, struct file *file) +{ + int error; + + dprintk(KERN_INFO "tf_ctrl_device_open(%u:%u, %p)\n", + imajor(inode), iminor(inode), file); + + /* Dummy lseek for non-seekable driver */ + error = nonseekable_open(inode, file); + if (error != 0) { + dprintk(KERN_ERR "tf_ctrl_device_open(%p): " + "nonseekable_open failed (error %d)!\n", + file, error); + goto error; + } + +#ifndef CONFIG_ANDROID + /* + * Check file flags. We only autthorize the O_RDWR access + */ + if (file->f_flags != O_RDWR) { + dprintk(KERN_ERR "tf_ctrl_device_open(%p): " + "Invalid access mode %u\n", + file, file->f_flags); + error = -EACCES; + goto error; + } +#endif + + error = tf_ctrl_check_omap_type(); + if (error <= 0) + return error; + + /* + * Successful completion. + */ + + dprintk(KERN_INFO "tf_ctrl_device_open(%p): Success\n", file); + return 0; + + /* + * Error handling. + */ +error: + dprintk(KERN_INFO "tf_ctrl_device_open(%p): Failure (error %d)\n", + file, error); + return error; +} + +static const struct file_operations g_tf_ctrl_device_file_ops = { + .owner = THIS_MODULE, + .open = tf_ctrl_device_open, + .unlocked_ioctl = tf_ctrl_device_ioctl, + .llseek = no_llseek, +}; + +int __init tf_ctrl_device_register(void) +{ + int error; + struct tf_device *dev = tf_get_device(); + + cdev_init(&dev->cdev_ctrl, &g_tf_ctrl_device_file_ops); + dev->cdev_ctrl.owner = THIS_MODULE; + + error = register_chrdev_region(dev->dev_number + 1, 1, + TF_DEVICE_CTRL_BASE_NAME); + if (error) + return error; + + error = cdev_add(&dev->cdev_ctrl, + dev->dev_number + 1, 1); + if (error) { + cdev_del(&(dev->cdev_ctrl)); + unregister_chrdev_region(dev->dev_number + 1, 1); + return error; + } + +#ifdef CONFIG_ANDROID + tf_ctrl_class = class_create(THIS_MODULE, TF_DEVICE_CTRL_BASE_NAME); + device_create(tf_ctrl_class, NULL, + dev->dev_number + 1, + NULL, TF_DEVICE_CTRL_BASE_NAME); +#endif + + mutex_init(&dev->dev_mutex); + + return error; +} + +static int __initdata smc_mem; +static int __initdata smc_address; + +void __init tf_allocate_workspace(void) +{ + struct tf_device *dev = tf_get_device(); + + if (tf_ctrl_check_omap_type() <= 0) + return; + + dev->workspace_size = smc_mem; + if (dev->workspace_size < 3*SZ_1M) + dev->workspace_size = 3*SZ_1M; + + if (smc_address == 0) +#if 0 + dev->workspace_addr = (u32) __pa(__alloc_bootmem( + dev->workspace_size, SZ_1M, __pa(MAX_DMA_ADDRESS))); +#else + dev->workspace_addr = (u32) 0xBFD00000; +#endif + else + dev->workspace_addr = smc_address; + + pr_info("SMC: Allocated workspace of 0x%x Bytes at (0x%x)\n", + dev->workspace_size, + dev->workspace_addr); +} + +static int __init tf_mem_setup(char *str) +{ + smc_mem = memparse(str, &str); + if (*str == '@') { + str += 1; + get_option(&str, &smc_address); + } + return 0; +} + +early_param("smc_mem", tf_mem_setup); diff --git a/security/smc/tf_dma.c b/security/smc/tf_dma.c new file mode 100644 index 0000000..a424dbb --- /dev/null +++ b/security/smc/tf_dma.c @@ -0,0 +1,106 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include "tf_defs.h" +#include "tf_util.h" +#include "tf_dma.h" + +#include <asm/atomic.h> + +static atomic_t g_dmaEventFlag = ATOMIC_INIT(0); + +/*------------------------------------------------------------------------ */ +/* + * Internal functions + */ + +static void tf_dma_callback(int lch, u16 ch_status, void *data) +{ + atomic_inc(&g_dmaEventFlag); +} + +/*------------------------------------------------------------------------ */ +/* + * Public DMA API + */ + +u32 tf_dma_request(int *lch) +{ + int dma_ch_out = 0; + + if (lch == NULL) + return PUBLIC_CRYPTO_ERR_BAD_PARAMETERS; + + if (omap_request_dma(0, "SMC Public Crypto", + tf_dma_callback, NULL, &dma_ch_out) != 0) + return PUBLIC_CRYPTO_ERR_OUT_OF_MEMORY; + + omap_disable_dma_irq(dma_ch_out, OMAP_DMA_DROP_IRQ | + OMAP_DMA_BLOCK_IRQ); + + *lch = dma_ch_out; + + return PUBLIC_CRYPTO_OPERATION_SUCCESS; +} + +/*------------------------------------------------------------------------ */ + +void tf_dma_start(int lch, int interrupt_mask) +{ + atomic_set(&g_dmaEventFlag, 0); + omap_enable_dma_irq(lch, interrupt_mask); + omap_start_dma(lch); +} + +/*------------------------------------------------------------------------ */ + +void tf_dma_wait(int nr_of_cb) +{ + while (atomic_read(&g_dmaEventFlag) < nr_of_cb) + cpu_relax(); +} + +/*------------------------------------------------------------------------ */ +/* + * Perform common DMA channel setup, used to factorize the code + * + * Output: struct omap_dma_channel_params *dma_channel + * Inputs: u32 nb_blocks Number of block of the transfer + * u32 nb_elements Number of elements of the transfer + * u32 dst_start Destination address + * u32 src_start Source address + * u32 trigger_id Trigger ID + */ +void tf_dma_set_channel_common_params( + struct omap_dma_channel_params *dma_channel, + u32 nb_blocks, u32 nb_elements, + u32 dst_start, u32 src_start, u32 trigger_id) +{ + dma_channel->data_type = OMAP_DMA_DATA_TYPE_S32; + dma_channel->elem_count = nb_elements; + dma_channel->frame_count = nb_blocks; + dma_channel->src_ei = 0; + dma_channel->src_fi = 0; + dma_channel->dst_ei = 0; + dma_channel->dst_fi = 0; + dma_channel->sync_mode = OMAP_DMA_SYNC_FRAME; + dma_channel->src_start = src_start; + dma_channel->dst_start = dst_start; + dma_channel->trigger = trigger_id; +} diff --git a/security/smc/tf_dma.h b/security/smc/tf_dma.h new file mode 100644 index 0000000..3492241 --- /dev/null +++ b/security/smc/tf_dma.h @@ -0,0 +1,64 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifndef __TF_PUBLIC_DMA_H +#define __TF_PUBLIC_DMA_H + +#include <linux/dma-mapping.h> +#include <plat/dma.h> +#include <plat/dma-44xx.h> + +#include "tf_crypto.h" + +/*-------------------------------------------------------------------------- */ +/* + * Public DMA API + */ + +/* + * CEN Masks + */ +#define DMA_CEN_Elts_per_Frame_AES 4 +#define DMA_CEN_Elts_per_Frame_DES 2 +#define DMA_CEN_Elts_per_Frame_SHA 16 + +/* + * Request a DMA channel + */ +u32 tf_dma_request(int *lch); + +/** + * This function waits for the DMA IRQ. + */ +void tf_dma_wait(int nr_of_cb); + +/* + * This function starts a DMA operation. + * + * lch DMA channel ID. + * interrupt_mask Configures the Channel Interrupt Control Register. + */ +void tf_dma_start(int lch, int interrupt_mask); + +void tf_dma_set_channel_common_params( + struct omap_dma_channel_params *dma_channel, + u32 nb_blocks, u32 nb_elements, u32 dst_start, + u32 src_start, u32 trigger_id); + +#endif /*__TF_PUBLIC_DMA_H */ diff --git a/security/smc/tf_protocol.h b/security/smc/tf_protocol.h new file mode 100644 index 0000000..e3e6485 --- /dev/null +++ b/security/smc/tf_protocol.h @@ -0,0 +1,674 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifndef __TF_PROTOCOL_H__ +#define __TF_PROTOCOL_H__ + +/*---------------------------------------------------------------------------- + * + * This header file defines the structure used in the SChannel Protocol. + * See your Product Reference Manual for a specification of the SChannel + * protocol. + *---------------------------------------------------------------------------*/ + +/* + * The driver interface version returned by the version ioctl + */ +#define TF_DRIVER_INTERFACE_VERSION 0x04000000 + +/* + * Protocol version handling + */ +#define TF_S_PROTOCOL_MAJOR_VERSION (0x06) +#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24) +#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF) + +/* + * The S flag of the config_flag_s register. + */ +#define TF_CONFIG_FLAG_S (1 << 3) + +/* + * The TimeSlot field of the sync_serial_n register. + */ +#define TF_SYNC_SERIAL_TIMESLOT_N (1) + +/* + * status_s related defines. + */ +#define TF_STATUS_P_MASK (0X00000001) +#define TF_STATUS_POWER_STATE_SHIFT (3) +#define TF_STATUS_POWER_STATE_MASK (0x1F << TF_STATUS_POWER_STATE_SHIFT) + +/* + * Possible power states of the POWER_STATE field of the status_s register + */ +#define TF_POWER_MODE_COLD_BOOT (0) +#define TF_POWER_MODE_WARM_BOOT (1) +#define TF_POWER_MODE_ACTIVE (3) +#define TF_POWER_MODE_READY_TO_SHUTDOWN (5) +#define TF_POWER_MODE_READY_TO_HIBERNATE (7) +#define TF_POWER_MODE_WAKEUP (8) +#define TF_POWER_MODE_PANIC (15) + +/* + * Possible command values for MANAGEMENT commands + */ +#define TF_MANAGEMENT_HIBERNATE (1) +#define TF_MANAGEMENT_SHUTDOWN (2) +#define TF_MANAGEMENT_PREPARE_FOR_CORE_OFF (3) +#define TF_MANAGEMENT_RESUME_FROM_CORE_OFF (4) + +/* + * The capacity of the Normal Word message queue, in number of slots. + */ +#define TF_N_MESSAGE_QUEUE_CAPACITY (512) + +/* + * The capacity of the Secure World message answer queue, in number of slots. + */ +#define TF_S_ANSWER_QUEUE_CAPACITY (256) + +/* + * The value of the S-timeout register indicating an infinite timeout. + */ +#define TF_S_TIMEOUT_0_INFINITE (0xFFFFFFFF) +#define TF_S_TIMEOUT_1_INFINITE (0xFFFFFFFF) + +/* + * The value of the S-timeout register indicating an immediate timeout. + */ +#define TF_S_TIMEOUT_0_IMMEDIATE (0x0) +#define TF_S_TIMEOUT_1_IMMEDIATE (0x0) + +/* + * Identifies the get protocol version SMC. + */ +#define TF_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB) + +/* + * Identifies the init SMC. + */ +#define TF_SMC_INIT (0XFFFFFFFF) + +/* + * Identifies the reset irq SMC. + */ +#define TF_SMC_RESET_IRQ (0xFFFFFFFE) + +/* + * Identifies the SET_W3B SMC. + */ +#define TF_SMC_WAKE_UP (0xFFFFFFFD) + +/* + * Identifies the STOP SMC. + */ +#define TF_SMC_STOP (0xFFFFFFFC) + +/* + * Identifies the n-yield SMC. + */ +#define TF_SMC_N_YIELD (0X00000003) + + +/* Possible stop commands for SMC_STOP */ +#define SCSTOP_HIBERNATE (0xFFFFFFE1) +#define SCSTOP_SHUTDOWN (0xFFFFFFE2) + +/* + * representation of an UUID. + */ +struct tf_uuid { + u32 time_low; + u16 time_mid; + u16 time_hi_and_version; + u8 clock_seq_and_node[8]; +}; + + +/** + * Command parameters. + */ +struct tf_command_param_value { + u32 a; + u32 b; +}; + +struct tf_command_param_temp_memref { + u32 descriptor; /* data pointer for exchange message.*/ + u32 size; + u32 offset; +}; + +struct tf_command_param_memref { + u32 block; + u32 size; + u32 offset; +}; + +union tf_command_param { + struct tf_command_param_value value; + struct tf_command_param_temp_memref temp_memref; + struct tf_command_param_memref memref; +}; + +/** + * Answer parameters. + */ +struct tf_answer_param_value { + u32 a; + u32 b; +}; + +struct tf_answer_param_size { + u32 _ignored; + u32 size; +}; + +union tf_answer_param { + struct tf_answer_param_size size; + struct tf_answer_param_value value; +}; + +/* + * Descriptor tables capacity + */ +#define TF_MAX_W3B_COARSE_PAGES (2) +#define TF_MAX_COARSE_PAGES (8) +#define TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8) +#define TF_DESCRIPTOR_TABLE_CAPACITY \ + (1 << TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) +#define TF_DESCRIPTOR_TABLE_CAPACITY_MASK \ + (TF_DESCRIPTOR_TABLE_CAPACITY - 1) +/* Shared memories coarse pages can map up to 1MB */ +#define TF_MAX_COARSE_PAGE_MAPPED_SIZE \ + (PAGE_SIZE * TF_DESCRIPTOR_TABLE_CAPACITY) +/* Shared memories cannot exceed 8MB */ +#define TF_MAX_SHMEM_SIZE \ + (TF_MAX_COARSE_PAGE_MAPPED_SIZE << 3) + +/* + * Buffer size for version description fields + */ +#define TF_DESCRIPTION_BUFFER_LENGTH 64 + +/* + * Shared memory type flags. + */ +#define TF_SHMEM_TYPE_READ (0x00000001) +#define TF_SHMEM_TYPE_WRITE (0x00000002) + +/* + * Shared mem flags + */ +#define TF_SHARED_MEM_FLAG_INPUT 1 +#define TF_SHARED_MEM_FLAG_OUTPUT 2 +#define TF_SHARED_MEM_FLAG_INOUT 3 + + +/* + * Parameter types + */ +#define TF_PARAM_TYPE_NONE 0x0 +#define TF_PARAM_TYPE_VALUE_INPUT 0x1 +#define TF_PARAM_TYPE_VALUE_OUTPUT 0x2 +#define TF_PARAM_TYPE_VALUE_INOUT 0x3 +#define TF_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5 +#define TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6 +#define TF_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7 +#define TF_PARAM_TYPE_MEMREF_ION_HANDLE 0xB +#define TF_PARAM_TYPE_MEMREF_INPUT 0xD +#define TF_PARAM_TYPE_MEMREF_OUTPUT 0xE +#define TF_PARAM_TYPE_MEMREF_INOUT 0xF + +#define TF_PARAM_TYPE_MEMREF_FLAG 0x4 +#define TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8 + + +#define TF_MAKE_PARAM_TYPES(t0, t1, t2, t3) \ + ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12)) +#define TF_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF) + +/* + * Login types. + */ +#define TF_LOGIN_PUBLIC 0x00000000 +#define TF_LOGIN_USER 0x00000001 +#define TF_LOGIN_GROUP 0x00000002 +#define TF_LOGIN_APPLICATION 0x00000004 +#define TF_LOGIN_APPLICATION_USER 0x00000005 +#define TF_LOGIN_APPLICATION_GROUP 0x00000006 +#define TF_LOGIN_AUTHENTICATION 0x80000000 +#define TF_LOGIN_PRIVILEGED 0x80000002 + +/* Login variants */ + +#define TF_LOGIN_VARIANT(main_type, os, variant) \ + ((main_type) | (1 << 27) | ((os) << 16) | ((variant) << 8)) + +#define TF_LOGIN_GET_MAIN_TYPE(type) \ + ((type) & ~TF_LOGIN_VARIANT(0, 0xFF, 0xFF)) + +#define TF_LOGIN_OS_ANY 0x00 +#define TF_LOGIN_OS_LINUX 0x01 +#define TF_LOGIN_OS_ANDROID 0x04 + +/* OS-independent variants */ +#define TF_LOGIN_USER_NONE \ + TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANY, 0xFF) +#define TF_LOGIN_GROUP_NONE \ + TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANY, 0xFF) +#define TF_LOGIN_APPLICATION_USER_NONE \ + TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANY, 0xFF) +#define TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \ + TF_LOGIN_VARIANT(TF_LOGIN_AUTHENTICATION, TF_LOGIN_OS_ANY, 0x01) +#define TF_LOGIN_PRIVILEGED_KERNEL \ + TF_LOGIN_VARIANT(TF_LOGIN_PRIVILEGED, TF_LOGIN_OS_ANY, 0x01) + +/* Linux variants */ +#define TF_LOGIN_USER_LINUX_EUID \ + TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_LINUX, 0x01) +#define TF_LOGIN_GROUP_LINUX_GID \ + TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_LINUX, 0x01) +#define TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \ + TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_LINUX, 0x01) +#define TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \ + TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_LINUX, 0x01) +#define TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \ + TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_LINUX, 0x01) + +/* Android variants */ +#define TF_LOGIN_USER_ANDROID_EUID \ + TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANDROID, 0x01) +#define TF_LOGIN_GROUP_ANDROID_GID \ + TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANDROID, 0x01) +#define TF_LOGIN_APPLICATION_ANDROID_UID \ + TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_ANDROID, 0x01) +#define TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \ + TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANDROID, \ + 0x01) +#define TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \ + TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_ANDROID, \ + 0x01) + +/* + * return origins + */ +#define TF_ORIGIN_COMMS 2 +#define TF_ORIGIN_TEE 3 +#define TF_ORIGIN_TRUSTED_APP 4 +/* + * The message types. + */ +#define TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02 +#define TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD +#define TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7 +#define TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9 +#define TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0 +#define TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2 +#define TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5 +#define TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4 +#define TF_MESSAGE_TYPE_MANAGEMENT 0xFE + + +/* + * The SChannel error codes. + */ +#define S_SUCCESS 0x00000000 +#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C + + +struct tf_command_header { + u8 message_size; + u8 message_type; + u16 message_info; + u32 operation_id; +}; + +struct tf_answer_header { + u8 message_size; + u8 message_type; + u16 message_info; + u32 operation_id; + u32 error_code; +}; + +/* + * CREATE_DEVICE_CONTEXT command message. + */ +struct tf_command_create_device_context { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + u32 operation_id; + u32 device_context_id; +}; + +/* + * CREATE_DEVICE_CONTEXT answer message. + */ +struct tf_answer_create_device_context { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + /* an opaque Normal World identifier for the operation */ + u32 operation_id; + u32 error_code; + /* an opaque Normal World identifier for the device context */ + u32 device_context; +}; + +/* + * DESTROY_DEVICE_CONTEXT command message. + */ +struct tf_command_destroy_device_context { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + u32 operation_id; + u32 device_context; +}; + +/* + * DESTROY_DEVICE_CONTEXT answer message. + */ +struct tf_answer_destroy_device_context { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + /* an opaque Normal World identifier for the operation */ + u32 operation_id; + u32 error_code; + u32 device_context_id; +}; + +/* + * OPEN_CLIENT_SESSION command message. + */ +struct tf_command_open_client_session { + u8 message_size; + u8 message_type; + u16 param_types; + /* an opaque Normal World identifier for the operation */ + u32 operation_id; + u32 device_context; + u32 cancellation_id; + u64 timeout; + struct tf_uuid destination_uuid; + union tf_command_param params[4]; + u32 login_type; + /* + * Size = 0 for public, [16] for group identification, [20] for + * authentication + */ + u8 login_data[20]; +}; + +/* + * OPEN_CLIENT_SESSION answer message. + */ +struct tf_answer_open_client_session { + u8 message_size; + u8 message_type; + u8 error_origin; + u8 __reserved; + /* an opaque Normal World identifier for the operation */ + u32 operation_id; + u32 error_code; + u32 client_session; + union tf_answer_param answers[4]; +}; + +/* + * CLOSE_CLIENT_SESSION command message. + */ +struct tf_command_close_client_session { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + /* an opaque Normal World identifier for the operation */ + u32 operation_id; + u32 device_context; + u32 client_session; +}; + +/* + * CLOSE_CLIENT_SESSION answer message. + */ +struct tf_answer_close_client_session { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + /* an opaque Normal World identifier for the operation */ + u32 operation_id; + u32 error_code; +}; + + +/* + * REGISTER_SHARED_MEMORY command message + */ +struct tf_command_register_shared_memory { + u8 message_size; + u8 message_type; + u16 memory_flags; + u32 operation_id; + u32 device_context; + u32 block_id; + u32 shared_mem_size; + u32 shared_mem_start_offset; + u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES]; +}; + +/* + * REGISTER_SHARED_MEMORY answer message. + */ +struct tf_answer_register_shared_memory { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + /* an opaque Normal World identifier for the operation */ + u32 operation_id; + u32 error_code; + u32 block; +}; + +/* + * RELEASE_SHARED_MEMORY command message. + */ +struct tf_command_release_shared_memory { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + /* an opaque Normal World identifier for the operation */ + u32 operation_id; + u32 device_context; + u32 block; +}; + +/* + * RELEASE_SHARED_MEMORY answer message. + */ +struct tf_answer_release_shared_memory { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + u32 operation_id; + u32 error_code; + u32 block_id; +}; + +/* + * INVOKE_CLIENT_COMMAND command message. + */ +struct tf_command_invoke_client_command { + u8 message_size; + u8 message_type; + u16 param_types; + u32 operation_id; + u32 device_context; + u32 client_session; + u64 timeout; + u32 cancellation_id; + u32 client_command_identifier; + union tf_command_param params[4]; +}; + +/* + * INVOKE_CLIENT_COMMAND command answer. + */ +struct tf_answer_invoke_client_command { + u8 message_size; + u8 message_type; + u8 error_origin; + u8 __reserved; + u32 operation_id; + u32 error_code; + union tf_answer_param answers[4]; +}; + +/* + * CANCEL_CLIENT_OPERATION command message. + */ +struct tf_command_cancel_client_operation { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + /* an opaque Normal World identifier for the operation */ + u32 operation_id; + u32 device_context; + u32 client_session; + u32 cancellation_id; +}; + +struct tf_answer_cancel_client_operation { + u8 message_size; + u8 message_type; + u16 message_info_rfu; + u32 operation_id; + u32 error_code; +}; + +/* + * MANAGEMENT command message. + */ +struct tf_command_management { + u8 message_size; + u8 message_type; + u16 command; + u32 operation_id; + u32 w3b_size; + u32 w3b_start_offset; + u32 shared_mem_descriptors[1]; +}; + +/* + * POWER_MANAGEMENT answer message. + * The message does not provide message specific parameters. + * Therefore no need to define a specific answer structure + */ + +/* + * Structure for L2 messages + */ +union tf_command { + struct tf_command_header header; + struct tf_command_create_device_context create_device_context; + struct tf_command_destroy_device_context destroy_device_context; + struct tf_command_open_client_session open_client_session; + struct tf_command_close_client_session close_client_session; + struct tf_command_register_shared_memory register_shared_memory; + struct tf_command_release_shared_memory release_shared_memory; + struct tf_command_invoke_client_command invoke_client_command; + struct tf_command_cancel_client_operation cancel_client_operation; + struct tf_command_management management; +}; + +/* + * Structure for any L2 answer + */ + +union tf_answer { + struct tf_answer_header header; + struct tf_answer_create_device_context create_device_context; + struct tf_answer_open_client_session open_client_session; + struct tf_answer_close_client_session close_client_session; + struct tf_answer_register_shared_memory register_shared_memory; + struct tf_answer_release_shared_memory release_shared_memory; + struct tf_answer_invoke_client_command invoke_client_command; + struct tf_answer_destroy_device_context destroy_device_context; + struct tf_answer_cancel_client_operation cancel_client_operation; +}; + +/* Structure of the Communication Buffer */ +struct tf_l1_shared_buffer { + u32 config_flag_s; + u32 w3b_size_max_s; + u32 reserved0; + u32 w3b_size_current_s; + u8 reserved1[48]; + u8 version_description[TF_DESCRIPTION_BUFFER_LENGTH]; + u32 status_s; + u32 reserved2; + u32 sync_serial_n; + u32 sync_serial_s; + u64 time_n[2]; + u64 timeout_s[2]; + u32 first_command; + u32 first_free_command; + u32 first_answer; + u32 first_free_answer; + u32 w3b_descriptors[128]; + #ifdef CONFIG_TF_ZEBRA + u8 rpc_trace_buffer[140]; + u8 rpc_cus_buffer[180]; + #else + u8 reserved3[320]; + #endif + u32 command_queue[TF_N_MESSAGE_QUEUE_CAPACITY]; + u32 answer_queue[TF_S_ANSWER_QUEUE_CAPACITY]; +}; + + +/* + * tf_version_information_buffer structure description + * Description of the sVersionBuffer handed over from user space to kernel space + * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl + * and handed back to user space + */ +struct tf_version_information_buffer { + u8 driver_description[65]; + u8 secure_world_description[65]; +}; + + +/* The IOCTLs the driver supports */ +#include <linux/ioctl.h> + +#define IOCTL_TF_GET_VERSION _IO('z', 0) +#define IOCTL_TF_EXCHANGE _IOWR('z', 1, union tf_command) +#define IOCTL_TF_GET_DESCRIPTION _IOR('z', 2, \ + struct tf_version_information_buffer) +#ifdef CONFIG_TF_ION +#define IOCTL_TF_ION_REGISTER _IOR('z', 254, int) +#define IOCTL_TF_ION_UNREGISTER _IOR('z', 255, int) +#endif + +#endif /* !defined(__TF_PROTOCOL_H__) */ diff --git a/security/smc/tf_teec.c b/security/smc/tf_teec.c new file mode 100644 index 0000000..6e6d5b2 --- /dev/null +++ b/security/smc/tf_teec.c @@ -0,0 +1,624 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#ifdef CONFIG_TF_TEEC + +#include <asm/atomic.h> +#include <asm/system.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/time.h> +#include <linux/types.h> +#include <linux/vmalloc.h> + +#include "tf_protocol.h" +#include "tf_defs.h" +#include "tf_util.h" +#include "tf_comm.h" +#include "tf_conn.h" +#include "tf_teec.h" + +#include "tee_client_api.h" + +#define TF_COMMAND_BYTES(cmd) \ + (sizeof(cmd) - sizeof(struct tf_command_header)) +#define TF_COMMAND_SIZE(cmd) \ + (TF_COMMAND_BYTES(cmd) / sizeof(u32)) + +/* Associate TEEC errors to POSIX/Linux errors. The matching is somewhat + arbitrary but one-to-one for supported error codes. */ +int TEEC_decode_error(TEEC_Result ret) +{ + switch (ret) + { + case TEEC_SUCCESS: return 0; + case TEEC_ERROR_GENERIC: return -EIO; + case TEEC_ERROR_ACCESS_DENIED: return -EPERM; + case TEEC_ERROR_CANCEL: return -ECANCELED; + case TEEC_ERROR_ACCESS_CONFLICT: return -EBUSY; + case TEEC_ERROR_EXCESS_DATA: return -E2BIG; + case TEEC_ERROR_BAD_FORMAT: return -EDOM; + case TEEC_ERROR_BAD_PARAMETERS: return -EINVAL; + case TEEC_ERROR_BAD_STATE: return -EBADFD; + case TEEC_ERROR_ITEM_NOT_FOUND: return -ENOENT; + case TEEC_ERROR_NOT_IMPLEMENTED: return -EPROTONOSUPPORT; + case TEEC_ERROR_NOT_SUPPORTED: return -ENOSYS; + case TEEC_ERROR_NO_DATA: return -ENODATA; + case TEEC_ERROR_OUT_OF_MEMORY: return -ENOMEM; + case TEEC_ERROR_BUSY: return -EAGAIN; + case TEEC_ERROR_COMMUNICATION: return -EPIPE; + case TEEC_ERROR_SECURITY: return -ECONNABORTED; + case TEEC_ERROR_SHORT_BUFFER: return -EFBIG; + default: return -EIO; + } +} + +/* Associate POSIX/Linux errors to TEEC errors. The matching is somewhat + arbitrary, but TEEC_encode_error(TEEC_decode_error(x))==x for supported + error codes. */ +TEEC_Result TEEC_encode_error(int err) +{ + if (err >= 0) { + return S_SUCCESS; + } + switch (err) { + case 0: return TEEC_SUCCESS; + case -EIO: return TEEC_ERROR_GENERIC; + case -EPERM: return TEEC_ERROR_ACCESS_DENIED; + case -ECANCELED: return TEEC_ERROR_CANCEL; + case -EBUSY: return TEEC_ERROR_ACCESS_CONFLICT; + case -E2BIG: return TEEC_ERROR_EXCESS_DATA; + case -EDOM: return TEEC_ERROR_BAD_FORMAT; + case -EINVAL: return TEEC_ERROR_BAD_PARAMETERS; + case -EBADFD: return TEEC_ERROR_BAD_STATE; + case -ENOENT: return TEEC_ERROR_ITEM_NOT_FOUND; + case -EPROTONOSUPPORT: return TEEC_ERROR_NOT_IMPLEMENTED; + case -ENOSYS: return TEEC_ERROR_NOT_SUPPORTED; + case -ENODATA: return TEEC_ERROR_NO_DATA; + case -ENOMEM: return TEEC_ERROR_OUT_OF_MEMORY; + case -EAGAIN: return TEEC_ERROR_BUSY; + case -EPIPE: return TEEC_ERROR_COMMUNICATION; + case -ECONNABORTED: return TEEC_ERROR_SECURITY; + case -EFBIG: return TEEC_ERROR_SHORT_BUFFER; + default: return TEEC_ERROR_GENERIC; + } +} + +/* Encode a TEEC time limit into an SChannel time limit. */ +static u64 TEEC_encode_timeout(const TEEC_TimeLimit *timeLimit) +{ + if (timeLimit == NULL) { + return (u64)-1; + } else { + return *timeLimit; + } +} + +/* Convert a timeout into a time limit in our internal format. */ +void TEEC_GetTimeLimit(TEEC_Context* sContext, + uint32_t nTimeout, /*ms from now*/ + TEEC_TimeLimit* sTimeLimit) +{ + /*Use the kernel time as the TEE time*/ + struct timeval now; + do_gettimeofday(&now); + *sTimeLimit = + ((TEEC_TimeLimit)now.tv_sec * 1000 + + now.tv_usec / 1000 + + nTimeout); +} + +#define TF_PARAM_TYPE_INPUT_FLAG 0x1 +#define TF_PARAM_TYPE_OUTPUT_FLAG 0x2 +#define TF_PARAM_TYPE_MEMREF_FLAG 0x4 +#define TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8 + +/* Update the type of a whole memref with the direction deduced from + the INPUT and OUTPUT flags of the memref. */ +static void TEEC_encode_whole_memref_flags(u16 *param_types, + unsigned i, + u32 flags) +{ + if (flags & TEEC_MEM_INPUT) + *param_types |= TF_PARAM_TYPE_INPUT_FLAG << (4*i); + if (flags & TEEC_MEM_OUTPUT) + *param_types |= TF_PARAM_TYPE_OUTPUT_FLAG << (4*i); +} + +/* Encode the parameters and type of an operation from the TEE API format + into an SChannel message. */ +void TEEC_encode_parameters(u16 *param_types, + union tf_command_param *params, + TEEC_Operation *operation) +{ + unsigned i; + if (operation == NULL) { + *param_types = 0; + return; + } + *param_types = operation->paramTypes; + for (i = 0; i < 4; i++) { + unsigned ty = TF_GET_PARAM_TYPE(operation->paramTypes, i); + TEEC_Parameter *op = operation->params + i; + if (ty & TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG) { + TEEC_SharedMemory *sm = op->memref.parent; + params[i].memref.block = sm->imp._block; + if (ty == TEEC_MEMREF_WHOLE) { + TEEC_encode_whole_memref_flags(param_types, i, + sm->flags); + params[i].memref.size = sm->size; + params[i].memref.offset = 0; + } else { + params[i].memref.size = op->memref.size; + params[i].memref.offset = op->memref.offset; + } + } else if (ty & TF_PARAM_TYPE_MEMREF_FLAG) { + /* Set up what tf_map_temp_shmem (called by + tf_open_client_session and + tf_invoke_client_command) expects: + .descriptor and .offset to both be set to the + address of the buffer. */ + u32 address = (u32)op->tmpref.buffer; + params[i].temp_memref.descriptor = address; + params[i].temp_memref.size = op->tmpref.size; + params[i].temp_memref.offset = address; + } else if (ty & TF_PARAM_TYPE_INPUT_FLAG) { + params[i].value.a = op->value.a; + params[i].value.b = op->value.b; + } else { + /* output-only value or none, so nothing to do */ + } + } +} + +/* Decode updated parameters from an SChannel answer into the TEE API format. */ +void TEEC_decode_parameters(union tf_answer_param *params, + TEEC_Operation *operation) +{ + unsigned i; + if (operation == NULL) { + return; + } + for (i = 0; i < 4; i++) { + unsigned ty = TF_GET_PARAM_TYPE(operation->paramTypes, i); + TEEC_Parameter *op = operation->params + i; + if (!(ty & TF_PARAM_TYPE_OUTPUT_FLAG)) { + /* input-only or none, so nothing to do */ + } else if (ty & TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG) { + op->memref.size = params[i].size.size; + } else if (ty & TF_PARAM_TYPE_MEMREF_FLAG) { + op->tmpref.size = params[i].size.size; + } else { + op->value.a = params[i].value.a; + op->value.b = params[i].value.b; + } + } +} + +/* Start a potentially-cancellable operation. */ +void TEEC_start_operation(TEEC_Context *context, + TEEC_Session *session, + TEEC_Operation *operation) +{ + if (operation != NULL) { + operation->imp._pSession = session; + /* Flush the assignment to imp._pSession, so that + RequestCancellation can read that field if started==1. */ + barrier(); + operation->started = 1; + } +} + +/* Mark a potentially-cancellable operation as finished. */ +void TEEC_finish_operation(TEEC_Operation *operation) +{ + if (operation != NULL) { + operation->started = 2; + barrier(); + } +} + + + +TEEC_Result TEEC_InitializeContext(const char *name, + TEEC_Context *context) +{ + int error; + struct tf_connection *connection = NULL; + + error = tf_open(tf_get_device(), NULL, &connection); + if (error != 0) { + dprintk(KERN_ERR "TEEC_InitializeContext(%s): " + "tf_open failed (error %d)!\n", + (name == NULL ? "(null)" : name), error); + goto error; + } + BUG_ON(connection == NULL); + connection->owner = TF_CONNECTION_OWNER_KERNEL; + + error = tf_create_device_context(connection); + if (error != 0) { + dprintk(KERN_ERR "TEEC_InitializeContext(%s): " + "tf_create_device_context failed (error %d)!\n", + (name == NULL ? "(null)" : name), error); + goto error; + } + + context->imp._connection = connection; + /*spin_lock_init(&context->imp._operations_lock);*/ + return S_SUCCESS; + +error: + tf_close(connection); + return TEEC_encode_error(error); +} + +void TEEC_FinalizeContext(TEEC_Context *context) +{ + struct tf_connection *connection = context->imp._connection; + dprintk(KERN_DEBUG "TEEC_FinalizeContext: connection=%p", connection); + tf_close(connection); + context->imp._connection = NULL; +} + +TEEC_Result TEEC_RegisterSharedMemory(TEEC_Context* context, + TEEC_SharedMemory* sharedMem) +{ + union tf_command command_message = {{0}}; + struct tf_command_register_shared_memory *cmd = + &command_message.register_shared_memory; + union tf_answer answer_message; + struct tf_answer_register_shared_memory *ans = + &answer_message.register_shared_memory; + TEEC_Result ret; + memset(&sharedMem->imp, 0, sizeof(sharedMem->imp)); + + cmd->message_size = TF_COMMAND_SIZE(*cmd); + cmd->message_type = TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY; + cmd->memory_flags = sharedMem->flags; + cmd->operation_id = (u32)&answer_message; + cmd->device_context = (u32)context; + /*cmd->block_id will be set by tf_register_shared_memory*/ + cmd->shared_mem_size = sharedMem->size; + cmd->shared_mem_start_offset = 0; + cmd->shared_mem_descriptors[0] = (u32)sharedMem->buffer; + + ret = TEEC_encode_error( + tf_register_shared_memory(context->imp._connection, + &command_message, + &answer_message)); + if (ret == TEEC_SUCCESS) { + ret = ans->error_code; + } + if (ret == S_SUCCESS) { + sharedMem->imp._context = context; + sharedMem->imp._block = ans->block; + } + return ret; +} + +#define TEEC_POINTER_TO_ZERO_SIZED_BUFFER ((void*)0x010) + +TEEC_Result TEEC_AllocateSharedMemory(TEEC_Context* context, + TEEC_SharedMemory* sharedMem) +{ + TEEC_Result ret; + dprintk(KERN_DEBUG "TEEC_AllocateSharedMemory: requested=%lu", + (unsigned long)sharedMem->size); + if (sharedMem->size == 0) { + /* Allocating 0 bytes must return a non-NULL pointer, but the + pointer doesn't need to be to memory that is mapped + anywhere. So we return a pointer into an unmapped page. */ + sharedMem->buffer = TEEC_POINTER_TO_ZERO_SIZED_BUFFER; + } else { + sharedMem->buffer = internal_vmalloc(sharedMem->size); + if (sharedMem->buffer == NULL) + { + dprintk(KERN_INFO "TEEC_AllocateSharedMemory: could not allocate %lu bytes", + (unsigned long)sharedMem->size); + return TEEC_ERROR_OUT_OF_MEMORY; + } + } + + ret = TEEC_RegisterSharedMemory(context, sharedMem); + if (ret == TEEC_SUCCESS) + { + sharedMem->imp._allocated = 1; + } + else + { + internal_vfree(sharedMem->buffer); + sharedMem->buffer = NULL; + memset(&sharedMem->imp, 0, sizeof(sharedMem->imp)); + } + return ret; +} + +void TEEC_ReleaseSharedMemory(TEEC_SharedMemory* sharedMem) +{ + TEEC_Context *context = sharedMem->imp._context; + union tf_command command_message = {{0}}; + struct tf_command_release_shared_memory *cmd = + &command_message.release_shared_memory; + union tf_answer answer_message; + + cmd->message_size = TF_COMMAND_SIZE(*cmd); + cmd->message_type = TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY; + cmd->operation_id = (u32)&answer_message; + cmd->device_context = (u32)context; + cmd->block = sharedMem->imp._block; + + tf_release_shared_memory(context->imp._connection, + &command_message, + &answer_message); + if (sharedMem->imp._allocated) { + if (sharedMem->buffer != TEEC_POINTER_TO_ZERO_SIZED_BUFFER) { + internal_vfree(sharedMem->buffer); + } + sharedMem->buffer = NULL; + sharedMem->size = 0; + } + memset(&sharedMem->imp, 0, sizeof(sharedMem->imp)); +} + +TEEC_Result TEEC_OpenSessionEx(TEEC_Context* context, + TEEC_Session* session, + const TEEC_TimeLimit* timeLimit, + const TEEC_UUID* destination, + u32 connectionMethod, + void* connectionData, + TEEC_Operation* operation, + u32* errorOrigin) +{ + union tf_command command_message = {{0}}; + struct tf_command_open_client_session *cmd = + &command_message.open_client_session; + union tf_answer answer_message = {{0}}; + struct tf_answer_open_client_session *ans = + &answer_message.open_client_session; + TEEC_Result ret; + + /* Note that we set the message size to the whole size of the + structure. tf_open_client_session will adjust it down + to trim the unnecessary portion of the login_data field. */ + cmd->message_size = TF_COMMAND_SIZE(*cmd); + cmd->message_type = TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION; + cmd->operation_id = (u32)&answer_message; + cmd->device_context = (u32)context; + cmd->cancellation_id = (u32)operation; + cmd->timeout = TEEC_encode_timeout(timeLimit); + memcpy(&cmd->destination_uuid, destination, + sizeof(cmd->destination_uuid)); + cmd->login_type = connectionMethod; + TEEC_encode_parameters(&cmd->param_types, cmd->params, operation); + + switch (connectionMethod) + { + case TEEC_LOGIN_PRIVILEGED: + case TEEC_LOGIN_PUBLIC: + break; + case TEEC_LOGIN_APPLICATION: + case TEEC_LOGIN_USER: + case TEEC_LOGIN_USER_APPLICATION: + case TEEC_LOGIN_GROUP: + case TEEC_LOGIN_GROUP_APPLICATION: + default: + return TEEC_ERROR_NOT_IMPLEMENTED; + } + + TEEC_start_operation(context, session, operation); + + ret = TEEC_encode_error( + tf_open_client_session(context->imp._connection, + &command_message, + &answer_message)); + + TEEC_finish_operation(operation); + TEEC_decode_parameters(ans->answers, operation); + if (errorOrigin != NULL) { + *errorOrigin = (ret == TEEC_SUCCESS ? + ans->error_origin : + TEEC_ORIGIN_COMMS); + } + + if (ret == TEEC_SUCCESS) { + ret = ans->error_code; + } + if (ret == S_SUCCESS) { + session->imp._client_session = ans->client_session; + session->imp._context = context; + } + return ret; +} + +TEEC_Result TEEC_OpenSession(TEEC_Context* context, + TEEC_Session* session, + const TEEC_UUID* destination, + u32 connectionMethod, + void* connectionData, + TEEC_Operation* operation, + u32* errorOrigin) +{ + return TEEC_OpenSessionEx(context, session, + NULL, /*timeLimit*/ + destination, + connectionMethod, connectionData, + operation, errorOrigin); +} + +void TEEC_CloseSession(TEEC_Session* session) +{ + if (session != NULL) { + TEEC_Context *context = session->imp._context; + union tf_command command_message = {{0}}; + struct tf_command_close_client_session *cmd = + &command_message.close_client_session; + union tf_answer answer_message; + + cmd->message_size = TF_COMMAND_SIZE(*cmd); + cmd->message_type = TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION; + cmd->operation_id = (u32)&answer_message; + cmd->device_context = (u32)context; + cmd->client_session = session->imp._client_session; + + tf_close_client_session(context->imp._connection, + &command_message, + &answer_message); + + session->imp._client_session = 0; + session->imp._context = NULL; + } +} + +TEEC_Result TEEC_InvokeCommandEx(TEEC_Session* session, + const TEEC_TimeLimit* timeLimit, + u32 commandID, + TEEC_Operation* operation, + u32* errorOrigin) +{ + TEEC_Context *context = session->imp._context; + union tf_command command_message = {{0}}; + struct tf_command_invoke_client_command *cmd = + &command_message.invoke_client_command; + union tf_answer answer_message = {{0}}; + struct tf_answer_invoke_client_command *ans = + &answer_message.invoke_client_command; + TEEC_Result ret; + + cmd->message_size = TF_COMMAND_SIZE(*cmd); + cmd->message_type = TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND; + cmd->operation_id = (u32)&answer_message; + cmd->device_context = (u32)context; + cmd->client_session = session->imp._client_session; + cmd->timeout = TEEC_encode_timeout(timeLimit); + cmd->cancellation_id = (u32)operation; + cmd->client_command_identifier = commandID; + TEEC_encode_parameters(&cmd->param_types, cmd->params, operation); + + TEEC_start_operation(context, session, operation); + + ret = TEEC_encode_error( + tf_invoke_client_command(context->imp._connection, + &command_message, + &answer_message)); + + TEEC_finish_operation(operation); + TEEC_decode_parameters(ans->answers, operation); + if (errorOrigin != NULL) { + *errorOrigin = (ret == TEEC_SUCCESS ? + ans->error_origin : + TEEC_ORIGIN_COMMS); + } + + if (ret == TEEC_SUCCESS) { + ret = ans->error_code; + } + return ret; +} + +TEEC_Result TEEC_InvokeCommand(TEEC_Session* session, + u32 commandID, + TEEC_Operation* operation, + u32* errorOrigin) +{ + return TEEC_InvokeCommandEx(session, + NULL, /*timeLimit*/ + commandID, + operation, errorOrigin); +} + +TEEC_Result TEEC_send_cancellation_message(TEEC_Context *context, + u32 client_session, + u32 cancellation_id) +{ + union tf_command command_message = {{0}}; + struct tf_command_cancel_client_operation *cmd = + &command_message.cancel_client_operation; + union tf_answer answer_message = {{0}}; + struct tf_answer_cancel_client_operation *ans = + &answer_message.cancel_client_operation; + TEEC_Result ret; + + cmd->message_size = TF_COMMAND_SIZE(*cmd); + cmd->message_type = TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND; + cmd->operation_id = (u32)&answer_message; + cmd->device_context = (u32)context; + cmd->client_session = client_session; + cmd->cancellation_id = cancellation_id; + + ret = TEEC_encode_error( + tf_cancel_client_command(context->imp._connection, + &command_message, + &answer_message)); + + if (ret == TEEC_SUCCESS) { + ret = ans->error_code; + } + return ret; +} + +void TEEC_RequestCancellation(TEEC_Operation* operation) +{ + TEEC_Result ret; + while (1) { + u32 state = operation->started; + switch (state) { + case 0: /*The operation data structure isn't initialized yet*/ + break; + + case 1: /*operation is in progress in the client*/ + ret = TEEC_send_cancellation_message( + operation->imp._pSession->imp._context, + operation->imp._pSession->imp._client_session, + (u32)operation); + if (ret == TEEC_SUCCESS) { + /*The cancellation was successful*/ + return; + } + /* The command has either not reached the secure world + yet or has completed already. Either way, retry. */ + break; + + case 2: /*operation has completed already*/ + return; + } + /* Since we're busy-waiting for the operation to be started + or finished, yield. */ + schedule(); + } +} + +EXPORT_SYMBOL(TEEC_encode_error); +EXPORT_SYMBOL(TEEC_decode_error); +EXPORT_SYMBOL(TEEC_InitializeContext); +EXPORT_SYMBOL(TEEC_FinalizeContext); +EXPORT_SYMBOL(TEEC_RegisterSharedMemory); +EXPORT_SYMBOL(TEEC_AllocateSharedMemory); +EXPORT_SYMBOL(TEEC_ReleaseSharedMemory); +EXPORT_SYMBOL(TEEC_OpenSession); +EXPORT_SYMBOL(TEEC_CloseSession); +EXPORT_SYMBOL(TEEC_InvokeCommand); +EXPORT_SYMBOL(TEEC_RequestCancellation); + +#endif /* defined(CONFIG_TF_TEEC) */ diff --git a/security/smc/omap4/scxlnx_util.c b/security/smc/tf_teec.h index 90cd831..28b3287 100644 --- a/security/smc/omap4/scxlnx_util.c +++ b/security/smc/tf_teec.h @@ -1,5 +1,5 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. +/** + * Copyright (c) 2011 Trusted Logic S.A. * All Rights Reserved. * * This program is free software; you can redistribute it and/or @@ -16,30 +16,18 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ -#include <linux/mman.h> -#include "scxlnx_util.h" -void *internal_kmalloc(size_t nSize, int nPriority) -{ - void *pResult; - struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice(); +#ifndef __TF_TEEC_H__ +#define __TF_TEEC_H__ - pResult = kmalloc(nSize, nPriority); +#ifdef CONFIG_TF_TEEC - if (pResult != NULL) - atomic_inc( - &pDevice->sDeviceStats.stat_memories_allocated); +#include "tf_defs.h" +#include "tee_client_api.h" - return pResult; -} +TEEC_Result TEEC_encode_error(int err); +int TEEC_decode_error(TEEC_Result ret); -void internal_kfree(void *pMemory) -{ - struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice(); - - if (pMemory != NULL) - atomic_dec( - &pDevice->sDeviceStats.stat_memories_allocated); - return kfree(pMemory); -} +#endif /* defined(CONFIG_TF_TEEC) */ +#endif /* !defined(__TF_TEEC_H__) */ diff --git a/security/smc/tf_util.c b/security/smc/tf_util.c new file mode 100644 index 0000000..ec9941b --- /dev/null +++ b/security/smc/tf_util.c @@ -0,0 +1,1145 @@ +/** + * Copyright (c) 2011 Trusted Logic S.A. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <linux/mman.h> +#include "tf_util.h" + +/*---------------------------------------------------------------------------- + * Debug printing routines + *----------------------------------------------------------------------------*/ +#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT + +void address_cache_property(unsigned long va) +{ + unsigned long pa; + unsigned long inner; + unsigned long outer; + + asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va)); + asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (pa)); + + dprintk(KERN_INFO "VA:%x, PA:%x\n", + (unsigned int) va, + (unsigned int) pa); + + if (pa & 1) { + dprintk(KERN_INFO "Prop Error\n"); + return; + } + + outer = (pa >> 2) & 3; + dprintk(KERN_INFO "\touter : %x", (unsigned int) outer); + + switch (outer) { + case 3: + dprintk(KERN_INFO "Write-Back, no Write-Allocate\n"); + break; + case 2: + dprintk(KERN_INFO "Write-Through, no Write-Allocate.\n"); + break; + case 1: + dprintk(KERN_INFO "Write-Back, Write-Allocate.\n"); + break; + case 0: + dprintk(KERN_INFO "Non-cacheable.\n"); + break; + } + + inner = (pa >> 4) & 7; + dprintk(KERN_INFO "\tinner : %x", (unsigned int)inner); + + switch (inner) { + case 7: + dprintk(KERN_INFO "Write-Back, no Write-Allocate\n"); + break; + case 6: + dprintk(KERN_INFO "Write-Through.\n"); + break; + case 5: + dprintk(KERN_INFO "Write-Back, Write-Allocate.\n"); + break; + case 3: + dprintk(KERN_INFO "Device.\n"); + break; + case 1: + dprintk(KERN_INFO "Strongly-ordered.\n"); + break; + case 0: + dprintk(KERN_INFO "Non-cacheable.\n"); + break; + } + + if (pa & 0x00000002) + dprintk(KERN_INFO "SuperSection.\n"); + if (pa & 0x00000080) + dprintk(KERN_INFO "Memory is shareable.\n"); + else + dprintk(KERN_INFO "Memory is non-shareable.\n"); + + if (pa & 0x00000200) + dprintk(KERN_INFO "Non-secure.\n"); +} + +#ifdef CONFIG_BENCH_SECURE_CYCLE + +#define LOOP_SIZE (100000) + +void run_bogo_mips(void) +{ + uint32_t cycles; + void *address = &run_bogo_mips; + + dprintk(KERN_INFO "BogoMIPS:\n"); + + setup_counters(); + cycles = run_code_speed(LOOP_SIZE); + dprintk(KERN_INFO "%u cycles with code access\n", cycles); + cycles = run_data_speed(LOOP_SIZE, (unsigned long)address); + dprintk(KERN_INFO "%u cycles to access %x\n", cycles, + (unsigned int) address); +} + +#endif /* CONFIG_BENCH_SECURE_CYCLE */ + +/* + * Dump the L1 shared buffer. + */ +void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer) +{ + dprintk(KERN_INFO + "buffer@%p:\n" + " config_flag_s=%08X\n" + " version_description=%64s\n" + " status_s=%08X\n" + " sync_serial_n=%08X\n" + " sync_serial_s=%08X\n" + " time_n[0]=%016llX\n" + " time_n[1]=%016llX\n" + " timeout_s[0]=%016llX\n" + " timeout_s[1]=%016llX\n" + " first_command=%08X\n" + " first_free_command=%08X\n" + " first_answer=%08X\n" + " first_free_answer=%08X\n\n", + buffer, + buffer->config_flag_s, + buffer->version_description, + buffer->status_s, + buffer->sync_serial_n, + buffer->sync_serial_s, + buffer->time_n[0], + buffer->time_n[1], + buffer->timeout_s[0], + buffer->timeout_s[1], + buffer->first_command, + buffer->first_free_command, + buffer->first_answer, + buffer->first_free_answer); +} + + +/* + * Dump the specified SChannel message using dprintk. + */ +void tf_dump_command(union tf_command *command) +{ + u32 i; + + dprintk(KERN_INFO "message@%p:\n", command); + + switch (command->header.message_type) { + case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT\n" + " operation_id = 0x%08X\n" + " device_context_id = 0x%08X\n", + command->header.message_size, + command->header.message_type, + command->header.operation_id, + command->create_device_context.device_context_id + ); + break; + + case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT\n" + " operation_id = 0x%08X\n" + " device_context = 0x%08X\n", + command->header.message_size, + command->header.message_type, + command->header.operation_id, + command->destroy_device_context.device_context); + break; + + case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION\n" + " param_types = 0x%04X\n" + " operation_id = 0x%08X\n" + " device_context = 0x%08X\n" + " cancellation_id = 0x%08X\n" + " timeout = 0x%016llX\n" + " destination_uuid = " + "%08X-%04X-%04X-%02X%02X-" + "%02X%02X%02X%02X%02X%02X\n", + command->header.message_size, + command->header.message_type, + command->open_client_session.param_types, + command->header.operation_id, + command->open_client_session.device_context, + command->open_client_session.cancellation_id, + command->open_client_session.timeout, + command->open_client_session.destination_uuid. + time_low, + command->open_client_session.destination_uuid. + time_mid, + command->open_client_session.destination_uuid. + time_hi_and_version, + command->open_client_session.destination_uuid. + clock_seq_and_node[0], + command->open_client_session.destination_uuid. + clock_seq_and_node[1], + command->open_client_session.destination_uuid. + clock_seq_and_node[2], + command->open_client_session.destination_uuid. + clock_seq_and_node[3], + command->open_client_session.destination_uuid. + clock_seq_and_node[4], + command->open_client_session.destination_uuid. + clock_seq_and_node[5], + command->open_client_session.destination_uuid. + clock_seq_and_node[6], + command->open_client_session.destination_uuid. + clock_seq_and_node[7] + ); + + for (i = 0; i < 4; i++) { + uint32_t *param = (uint32_t *) &command-> + open_client_session.params[i]; + dprintk(KERN_INFO " params[%d] = " + "0x%08X:0x%08X:0x%08X\n", + i, param[0], param[1], param[2]); + } + + switch (TF_LOGIN_GET_MAIN_TYPE( + command->open_client_session.login_type)) { + case TF_LOGIN_PUBLIC: + dprintk( + KERN_INFO " login_type = " + "TF_LOGIN_PUBLIC\n"); + break; + case TF_LOGIN_USER: + dprintk( + KERN_INFO " login_type = " + "TF_LOGIN_USER\n"); + break; + case TF_LOGIN_GROUP: + dprintk( + KERN_INFO " login_type = " + "TF_LOGIN_GROUP\n"); + break; + case TF_LOGIN_APPLICATION: + dprintk( + KERN_INFO " login_type = " + "TF_LOGIN_APPLICATION\n"); + break; + case TF_LOGIN_APPLICATION_USER: + dprintk( + KERN_INFO " login_type = " + "TF_LOGIN_APPLICATION_USER\n"); + break; + case TF_LOGIN_APPLICATION_GROUP: + dprintk( + KERN_INFO " login_type = " + "TF_LOGIN_APPLICATION_GROUP\n"); + break; + case TF_LOGIN_AUTHENTICATION: + dprintk( + KERN_INFO " login_type = " + "TF_LOGIN_AUTHENTICATION\n"); + break; + case TF_LOGIN_PRIVILEGED: + dprintk( + KERN_INFO " login_type = " + "TF_LOGIN_PRIVILEGED\n"); + break; + case TF_LOGIN_PRIVILEGED_KERNEL: + dprintk( + KERN_INFO " login_type = " + "TF_LOGIN_PRIVILEGED_KERNEL\n"); + break; + default: + dprintk( + KERN_ERR " login_type = " + "0x%08X (Unknown login type)\n", + command->open_client_session.login_type); + break; + } + + dprintk( + KERN_INFO " login_data = "); + for (i = 0; i < 20; i++) + dprintk( + KERN_INFO "%d", + command->open_client_session. + login_data[i]); + dprintk("\n"); + break; + + case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION\n" + " operation_id = 0x%08X\n" + " device_context = 0x%08X\n" + " client_session = 0x%08X\n", + command->header.message_size, + command->header.message_type, + command->header.operation_id, + command->close_client_session.device_context, + command->close_client_session.client_session + ); + break; + + case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY\n" + " memory_flags = 0x%04X\n" + " operation_id = 0x%08X\n" + " device_context = 0x%08X\n" + " block_id = 0x%08X\n" + " shared_mem_size = 0x%08X\n" + " shared_mem_start_offset = 0x%08X\n" + " shared_mem_descriptors[0] = 0x%08X\n" + " shared_mem_descriptors[1] = 0x%08X\n" + " shared_mem_descriptors[2] = 0x%08X\n" + " shared_mem_descriptors[3] = 0x%08X\n" + " shared_mem_descriptors[4] = 0x%08X\n" + " shared_mem_descriptors[5] = 0x%08X\n" + " shared_mem_descriptors[6] = 0x%08X\n" + " shared_mem_descriptors[7] = 0x%08X\n", + command->header.message_size, + command->header.message_type, + command->register_shared_memory.memory_flags, + command->header.operation_id, + command->register_shared_memory.device_context, + command->register_shared_memory.block_id, + command->register_shared_memory.shared_mem_size, + command->register_shared_memory. + shared_mem_start_offset, + command->register_shared_memory. + shared_mem_descriptors[0], + command->register_shared_memory. + shared_mem_descriptors[1], + command->register_shared_memory. + shared_mem_descriptors[2], + command->register_shared_memory. + shared_mem_descriptors[3], + command->register_shared_memory. + shared_mem_descriptors[4], + command->register_shared_memory. + shared_mem_descriptors[5], + command->register_shared_memory. + shared_mem_descriptors[6], + command->register_shared_memory. + shared_mem_descriptors[7]); + break; + + case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY\n" + " operation_id = 0x%08X\n" + " device_context = 0x%08X\n" + " block = 0x%08X\n", + command->header.message_size, + command->header.message_type, + command->header.operation_id, + command->release_shared_memory.device_context, + command->release_shared_memory.block); + break; + + case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND\n" + " param_types = 0x%04X\n" + " operation_id = 0x%08X\n" + " device_context = 0x%08X\n" + " client_session = 0x%08X\n" + " timeout = 0x%016llX\n" + " cancellation_id = 0x%08X\n" + " client_command_identifier = 0x%08X\n", + command->header.message_size, + command->header.message_type, + command->invoke_client_command.param_types, + command->header.operation_id, + command->invoke_client_command.device_context, + command->invoke_client_command.client_session, + command->invoke_client_command.timeout, + command->invoke_client_command.cancellation_id, + command->invoke_client_command. + client_command_identifier + ); + + for (i = 0; i < 4; i++) { + uint32_t *param = (uint32_t *) &command-> + open_client_session.params[i]; + dprintk(KERN_INFO " params[%d] = " + "0x%08X:0x%08X:0x%08X\n", i, + param[0], param[1], param[2]); + } + break; + + case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND\n" + " operation_id = 0x%08X\n" + " device_context = 0x%08X\n" + " client_session = 0x%08X\n", + command->header.message_size, + command->header.message_type, + command->header.operation_id, + command->cancel_client_operation.device_context, + command->cancel_client_operation.client_session); + break; + + case TF_MESSAGE_TYPE_MANAGEMENT: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_MANAGEMENT\n" + " operation_id = 0x%08X\n" + " command = 0x%08X\n" + " w3b_size = 0x%08X\n" + " w3b_start_offset = 0x%08X\n", + command->header.message_size, + command->header.message_type, + command->header.operation_id, + command->management.command, + command->management.w3b_size, + command->management.w3b_start_offset); + break; + + default: + dprintk( + KERN_ERR " message_type = 0x%08X " + "(Unknown message type)\n", + command->header.message_type); + break; + } +} + + +/* + * Dump the specified SChannel answer using dprintk. + */ +void tf_dump_answer(union tf_answer *answer) +{ + u32 i; + dprintk( + KERN_INFO "answer@%p:\n", + answer); + + switch (answer->header.message_type) { + case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "tf_answer_create_device_context\n" + " operation_id = 0x%08X\n" + " error_code = 0x%08X\n" + " device_context = 0x%08X\n", + answer->header.message_size, + answer->header.message_type, + answer->header.operation_id, + answer->create_device_context.error_code, + answer->create_device_context.device_context); + break; + + case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "ANSWER_DESTROY_DEVICE_CONTEXT\n" + " operation_id = 0x%08X\n" + " error_code = 0x%08X\n" + " device_context_id = 0x%08X\n", + answer->header.message_size, + answer->header.message_type, + answer->header.operation_id, + answer->destroy_device_context.error_code, + answer->destroy_device_context.device_context_id); + break; + + + case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "tf_answer_open_client_session\n" + " error_origin = 0x%02X\n" + " operation_id = 0x%08X\n" + " error_code = 0x%08X\n" + " client_session = 0x%08X\n", + answer->header.message_size, + answer->header.message_type, + answer->open_client_session.error_origin, + answer->header.operation_id, + answer->open_client_session.error_code, + answer->open_client_session.client_session); + for (i = 0; i < 4; i++) { + dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n", + i, + answer->open_client_session.answers[i]. + value.a, + answer->open_client_session.answers[i]. + value.b); + } + break; + + case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "ANSWER_CLOSE_CLIENT_SESSION\n" + " operation_id = 0x%08X\n" + " error_code = 0x%08X\n", + answer->header.message_size, + answer->header.message_type, + answer->header.operation_id, + answer->close_client_session.error_code); + break; + + case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "tf_answer_register_shared_memory\n" + " operation_id = 0x%08X\n" + " error_code = 0x%08X\n" + " block = 0x%08X\n", + answer->header.message_size, + answer->header.message_type, + answer->header.operation_id, + answer->register_shared_memory.error_code, + answer->register_shared_memory.block); + break; + + case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "ANSWER_RELEASE_SHARED_MEMORY\n" + " operation_id = 0x%08X\n" + " error_code = 0x%08X\n" + " block_id = 0x%08X\n", + answer->header.message_size, + answer->header.message_type, + answer->header.operation_id, + answer->release_shared_memory.error_code, + answer->release_shared_memory.block_id); + break; + + case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "tf_answer_invoke_client_command\n" + " error_origin = 0x%02X\n" + " operation_id = 0x%08X\n" + " error_code = 0x%08X\n", + answer->header.message_size, + answer->header.message_type, + answer->invoke_client_command.error_origin, + answer->header.operation_id, + answer->invoke_client_command.error_code + ); + for (i = 0; i < 4; i++) { + dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n", + i, + answer->invoke_client_command.answers[i]. + value.a, + answer->invoke_client_command.answers[i]. + value.b); + } + break; + + case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_ANSWER_CANCEL_CLIENT_COMMAND\n" + " operation_id = 0x%08X\n" + " error_code = 0x%08X\n", + answer->header.message_size, + answer->header.message_type, + answer->header.operation_id, + answer->cancel_client_operation.error_code); + break; + + case TF_MESSAGE_TYPE_MANAGEMENT: + dprintk(KERN_INFO + " message_size = 0x%02X\n" + " message_type = 0x%02X " + "TF_MESSAGE_TYPE_MANAGEMENT\n" + " operation_id = 0x%08X\n" + " error_code = 0x%08X\n", + answer->header.message_size, + answer->header.message_type, + answer->header.operation_id, + answer->header.error_code); + break; + + default: + dprintk( + KERN_ERR " message_type = 0x%02X " + "(Unknown message type)\n", + answer->header.message_type); + break; + + } +} + +#endif /* defined(TF_DRIVER_DEBUG_SUPPORT) */ + +/*---------------------------------------------------------------------------- + * SHA-1 implementation + * This is taken from the Linux kernel source crypto/sha1.c + *----------------------------------------------------------------------------*/ + +struct sha1_ctx { + u64 count; + u32 state[5]; + u8 buffer[64]; +}; + +static inline u32 rol(u32 value, u32 bits) +{ + return ((value) << (bits)) | ((value) >> (32 - (bits))); +} + +/* blk0() and blk() perform the initial expand. */ +/* I got the idea of expanding during the round function from SSLeay */ +#define blk0(i) block32[i] + +#define blk(i) (block32[i & 15] = rol( \ + block32[(i + 13) & 15] ^ block32[(i + 8) & 15] ^ \ + block32[(i + 2) & 15] ^ block32[i & 15], 1)) + +/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */ +#define R0(v, w, x, y, z, i) do { \ + z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \ + w = rol(w, 30); } while (0) + +#define R1(v, w, x, y, z, i) do { \ + z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \ + w = rol(w, 30); } while (0) + +#define R2(v, w, x, y, z, i) do { \ + z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \ + w = rol(w, 30); } while (0) + +#define R3(v, w, x, y, z, i) do { \ + z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \ + w = rol(w, 30); } while (0) + +#define R4(v, w, x, y, z, i) do { \ + z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \ + w = rol(w, 30); } while (0) + + +/* Hash a single 512-bit block. This is the core of the algorithm. */ +static void sha1_transform(u32 *state, const u8 *in) +{ + u32 a, b, c, d, e; + u32 block32[16]; + + /* convert/copy data to workspace */ + for (a = 0; a < sizeof(block32)/sizeof(u32); a++) + block32[a] = ((u32) in[4 * a]) << 24 | + ((u32) in[4 * a + 1]) << 16 | + ((u32) in[4 * a + 2]) << 8 | + ((u32) in[4 * a + 3]); + + /* Copy context->state[] to working vars */ + a = state[0]; + b = state[1]; + c = state[2]; + d = state[3]; + e = state[4]; + + /* 4 rounds of 20 operations each. Loop unrolled. */ + R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1); + R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3); + R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5); + R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7); + R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9); + R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11); + R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13); + R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15); + + R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17); + R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19); + + R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21); + R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23); + R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25); + R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27); + R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29); + R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31); + R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33); + R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35); + R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37); + R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39); + + R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41); + R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43); + R3(b, c, d, e, a, 44); R3(a, b, c, d, e, 45); + R3(e, a, b, c, d, 46); R3(d, e, a, b, c, 47); + R3(c, d, e, a, b, 48); R3(b, c, d, e, a, 49); + R3(a, b, c, d, e, 50); R3(e, a, b, c, d, 51); + R3(d, e, a, b, c, 52); R3(c, d, e, a, b, 53); + R3(b, c, d, e, a, 54); R3(a, b, c, d, e, 55); + R3(e, a, b, c, d, 56); R3(d, e, a, b, c, 57); + R3(c, d, e, a, b, 58); R3(b, c, d, e, a, 59); + + R4(a, b, c, d, e, 60); R4(e, a, b, c, d, 61); + R4(d, e, a, b, c, 62); R4(c, d, e, a, b, 63); + R4(b, c, d, e, a, 64); R4(a, b, c, d, e, 65); + R4(e, a, b, c, d, 66); R4(d, e, a, b, c, 67); + R4(c, d, e, a, b, 68); R4(b, c, d, e, a, 69); + R4(a, b, c, d, e, 70); R4(e, a, b, c, d, 71); + R4(d, e, a, b, c, 72); R4(c, d, e, a, b, 73); + R4(b, c, d, e, a, 74); R4(a, b, c, d, e, 75); + R4(e, a, b, c, d, 76); R4(d, e, a, b, c, 77); + R4(c, d, e, a, b, 78); R4(b, c, d, e, a, 79); + + /* Add the working vars back into context.state[] */ + state[0] += a; + state[1] += b; + state[2] += c; + state[3] += d; + state[4] += e; + /* Wipe variables */ + a = b = c = d = e = 0; + memset(block32, 0x00, sizeof(block32)); +} + + +static void sha1_init(void *ctx) +{ + struct sha1_ctx *sctx = ctx; + static const struct sha1_ctx initstate = { + 0, + { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 }, + { 0, } + }; + + *sctx = initstate; +} + + +static void sha1_update(void *ctx, const u8 *data, unsigned int len) +{ + struct sha1_ctx *sctx = ctx; + unsigned int i, j; + + j = (sctx->count >> 3) & 0x3f; + sctx->count += len << 3; + + if ((j + len) > 63) { + memcpy(&sctx->buffer[j], data, (i = 64 - j)); + sha1_transform(sctx->state, sctx->buffer); + for ( ; i + 63 < len; i += 64) + sha1_transform(sctx->state, &data[i]); + j = 0; + } else + i = 0; + memcpy(&sctx->buffer[j], &data[i], len - i); +} + + +/* Add padding and return the message digest. */ +static void sha1_final(void *ctx, u8 *out) +{ + struct sha1_ctx *sctx = ctx; + u32 i, j, index, padlen; + u64 t; + u8 bits[8] = { 0, }; + static const u8 padding[64] = { 0x80, }; + + t = sctx->count; + bits[7] = 0xff & t; t >>= 8; + bits[6] = 0xff & t; t >>= 8; + bits[5] = 0xff & t; t >>= 8; + bits[4] = 0xff & t; t >>= 8; + bits[3] = 0xff & t; t >>= 8; + bits[2] = 0xff & t; t >>= 8; + bits[1] = 0xff & t; t >>= 8; + bits[0] = 0xff & t; + + /* Pad out to 56 mod 64 */ + index = (sctx->count >> 3) & 0x3f; + padlen = (index < 56) ? (56 - index) : ((64+56) - index); + sha1_update(sctx, padding, padlen); + + /* Append length */ + sha1_update(sctx, bits, sizeof(bits)); + + /* Store state in digest */ + for (i = j = 0; i < 5; i++, j += 4) { + u32 t2 = sctx->state[i]; + out[j+3] = t2 & 0xff; t2 >>= 8; + out[j+2] = t2 & 0xff; t2 >>= 8; + out[j+1] = t2 & 0xff; t2 >>= 8; + out[j] = t2 & 0xff; + } + + /* Wipe context */ + memset(sctx, 0, sizeof(*sctx)); +} + + + + +/*---------------------------------------------------------------------------- + * Process identification + *----------------------------------------------------------------------------*/ + +/* This function generates a processes hash table for authentication */ +int tf_get_current_process_hash(void *hash) +{ + int result = 0; + void *buffer; + struct mm_struct *mm; + struct vm_area_struct *vma; + + buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL); + if (buffer == NULL) { + dprintk( + KERN_ERR "tf_get_current_process_hash:" + " Out of memory for buffer!\n"); + return -ENOMEM; + } + + mm = current->mm; + + down_read(&(mm->mmap_sem)); + for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { + if ((vma->vm_flags & VM_EXECUTABLE) != 0 && vma->vm_file + != NULL) { + struct dentry *dentry; + unsigned long start; + unsigned long cur; + unsigned long end; + struct sha1_ctx sha1; + + dentry = dget(vma->vm_file->f_dentry); + + dprintk( + KERN_DEBUG "tf_get_current_process_hash: " + "Found executable VMA for inode %lu " + "(%lu bytes).\n", + dentry->d_inode->i_ino, + (unsigned long) (dentry->d_inode-> + i_size)); + + start = do_mmap(vma->vm_file, 0, + dentry->d_inode->i_size, + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_PRIVATE, 0); + if (start < 0) { + dprintk( + KERN_ERR "tf_get_current_process_hash" + "Hash: do_mmap failed (error %d)!\n", + (int) start); + dput(dentry); + result = -EFAULT; + goto vma_out; + } + + end = start + dentry->d_inode->i_size; + + sha1_init(&sha1); + cur = start; + while (cur < end) { + unsigned long chunk; + + chunk = end - cur; + if (chunk > PAGE_SIZE) + chunk = PAGE_SIZE; + if (copy_from_user(buffer, (const void *) cur, + chunk) != 0) { + dprintk( + KERN_ERR "tf_get_current_" + "process_hash: copy_from_user " + "failed!\n"); + result = -EINVAL; + (void) do_munmap(mm, start, + dentry->d_inode->i_size); + dput(dentry); + goto vma_out; + } + sha1_update(&sha1, buffer, chunk); + cur += chunk; + } + sha1_final(&sha1, hash); + result = 0; + + (void) do_munmap(mm, start, dentry->d_inode->i_size); + dput(dentry); + break; + } + } +vma_out: + up_read(&(mm->mmap_sem)); + + internal_kfree(buffer); + + if (result == -ENOENT) + dprintk( + KERN_ERR "tf_get_current_process_hash: " + "No executable VMA found for process!\n"); + return result; +} + +#ifndef CONFIG_ANDROID +/* This function hashes the path of the current application. + * If data = NULL ,nothing else is added to the hash + else add data to the hash + */ +int tf_hash_application_path_and_data(char *buffer, void *data, + u32 data_len) +{ + int result = -ENOENT; + char *buffer = NULL; + struct mm_struct *mm; + struct vm_area_struct *vma; + + buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL); + if (buffer == NULL) { + result = -ENOMEM; + goto end; + } + + mm = current->mm; + + down_read(&(mm->mmap_sem)); + for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { + if ((vma->vm_flags & VM_EXECUTABLE) != 0 + && vma->vm_file != NULL) { + struct path *path; + char *endpath; + size_t pathlen; + struct sha1_ctx sha1; + u8 hash[SHA1_DIGEST_SIZE]; + + path = &vma->vm_file->f_path; + + endpath = d_path(path, buffer, PAGE_SIZE); + if (IS_ERR(path)) { + result = PTR_ERR(endpath); + up_read(&(mm->mmap_sem)); + goto end; + } + pathlen = (buffer + PAGE_SIZE) - endpath; + +#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT + { + char *c; + dprintk(KERN_DEBUG "current process path = "); + for (c = endpath; + c < buffer + PAGE_SIZE; + c++) + dprintk("%c", *c); + + dprintk(", uid=%d, euid=%d\n", current_uid(), + current_euid()); + } +#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */ + + sha1_init(&sha1); + sha1_update(&sha1, endpath, pathlen); + if (data != NULL) { + dprintk(KERN_INFO "current process path: " + "Hashing additional data\n"); + sha1_update(&sha1, data, data_len); + } + sha1_final(&sha1, hash); + memcpy(buffer, hash, sizeof(hash)); + + result = 0; + + break; + } + } + up_read(&(mm->mmap_sem)); + +end: + if (buffer != NULL) + internal_kfree(buffer); + + return result; +} +#endif /* !CONFIG_ANDROID */ + +void *internal_kmalloc(size_t size, int priority) +{ + void *ptr; + struct tf_device *dev = tf_get_device(); + + ptr = kmalloc(size, priority); + + if (ptr != NULL) + atomic_inc( + &dev->stats.stat_memories_allocated); + + return ptr; +} + +void internal_kfree(void *ptr) +{ + struct tf_device *dev = tf_get_device(); + + if (ptr != NULL) + atomic_dec( + &dev->stats.stat_memories_allocated); + return kfree(ptr); +} + +void internal_vunmap(void *ptr) +{ + struct tf_device *dev = tf_get_device(); + + if (ptr != NULL) + atomic_dec( + &dev->stats.stat_memories_allocated); + + vunmap((void *) (((unsigned int)ptr) & 0xFFFFF000)); +} + +void *internal_vmalloc(size_t size) +{ + void *ptr; + struct tf_device *dev = tf_get_device(); + + ptr = vmalloc(size); + + if (ptr != NULL) + atomic_inc( + &dev->stats.stat_memories_allocated); + + return ptr; +} + +void internal_vfree(void *ptr) +{ + struct tf_device *dev = tf_get_device(); + + if (ptr != NULL) + atomic_dec( + &dev->stats.stat_memories_allocated); + return vfree(ptr); +} + +unsigned long internal_get_zeroed_page(int priority) +{ + unsigned long result; + struct tf_device *dev = tf_get_device(); + + result = get_zeroed_page(priority); + + if (result != 0) + atomic_inc(&dev->stats. + stat_pages_allocated); + + return result; +} + +void internal_free_page(unsigned long addr) +{ + struct tf_device *dev = tf_get_device(); + + if (addr != 0) + atomic_dec( + &dev->stats.stat_pages_allocated); + return free_page(addr); +} + +int internal_get_user_pages( + struct task_struct *tsk, + struct mm_struct *mm, + unsigned long start, + int len, + int write, + int force, + struct page **pages, + struct vm_area_struct **vmas) +{ + int result; + struct tf_device *dev = tf_get_device(); + + result = get_user_pages( + tsk, + mm, + start, + len, + write, + force, + pages, + vmas); + + if (result > 0) + atomic_add(result, + &dev->stats.stat_pages_locked); + + return result; +} + +void internal_get_page(struct page *page) +{ + struct tf_device *dev = tf_get_device(); + + atomic_inc(&dev->stats.stat_pages_locked); + + get_page(page); +} + +void internal_page_cache_release(struct page *page) +{ + struct tf_device *dev = tf_get_device(); + + atomic_dec(&dev->stats.stat_pages_locked); + + page_cache_release(page); +} diff --git a/security/smc/omap4/scxlnx_util.h b/security/smc/tf_util.h index 4569ec2..43a05da 100644 --- a/security/smc/omap4/scxlnx_util.h +++ b/security/smc/tf_util.h @@ -1,5 +1,5 @@ -/* - * Copyright (c) 2006-2010 Trusted Logic S.A. +/** + * Copyright (c) 2011 Trusted Logic S.A. * All Rights Reserved. * * This program is free software; you can redistribute it and/or @@ -16,8 +16,9 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ -#ifndef __SCXLNX_UTIL_H__ -#define __SCXLNX_UTIL_H__ + +#ifndef __TF_UTIL_H__ +#define __TF_UTIL_H__ #include <linux/spinlock.h> #include <linux/errno.h> @@ -30,8 +31,8 @@ #include <linux/vmalloc.h> #include <asm/byteorder.h> -#include "scx_protocol.h" -#include "scxlnx_defs.h" +#include "tf_protocol.h" +#include "tf_defs.h" /*---------------------------------------------------------------------------- * Debug printing routines @@ -39,29 +40,29 @@ #ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT -void addressCacheProperty(unsigned long va); +void address_cache_property(unsigned long va); #define dprintk printk -void SCXLNXDumpL1SharedBuffer(struct SCHANNEL_C1S_BUFFER *pBuf); +void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer); -void SCXLNXDumpMessage(union SCX_COMMAND_MESSAGE *pMessage); +void tf_dump_command(union tf_command *command); -void SCXLNXDumpAnswer(union SCX_ANSWER_MESSAGE *pAnswer); +void tf_dump_answer(union tf_answer *answer); -#ifdef CONFIG_SMC_BENCH_SECURE_CYCLE -void setupCounters(void); -void runBogoMIPS(void); -int runCodeSpeed(unsigned int nLoop); -int runDataSpeed(unsigned int nLoop, unsigned long nVA); -#endif /* CONFIG_SMC_BENCH_SECURE_CYCLE */ +#ifdef CONFIG_BENCH_SECURE_CYCLE +void setup_counters(void); +void run_bogo_mips(void); +int run_code_speed(unsigned int loop); +int run_data_speed(unsigned int loop, unsigned long va); +#endif /* CONFIG_BENCH_SECURE_CYCLE */ #else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */ #define dprintk(args...) do { ; } while (0) -#define SCXLNXDumpL1SharedBuffer(pBuf) ((void) 0) -#define SCXLNXDumpMessage(pMessage) ((void) 0) -#define SCXLNXDumpAnswer(pAnswer) ((void) 0) +#define tf_dump_l1_shared_buffer(buffer) ((void) 0) +#define tf_dump_command(command) ((void) 0) +#define tf_dump_answer(answer) ((void) 0) #endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */ @@ -71,22 +72,23 @@ int runDataSpeed(unsigned int nLoop, unsigned long nVA); * Process identification *----------------------------------------------------------------------------*/ -int SCXLNXConnGetCurrentProcessHash(void *pHash); +int tf_get_current_process_hash(void *hash); -int SCXLNXConnHashApplicationPathAndData(char *pBuffer, void *pData, - u32 nDataLen); +#ifndef CONFIG_ANDROID +int tf_hash_application_path_and_data(char *buffer, void *data, u32 data_len); +#endif /* !CONFIG_ANDROID */ /*---------------------------------------------------------------------------- * Statistic computation *----------------------------------------------------------------------------*/ -void *internal_kmalloc(size_t nSize, int nPriority); -void internal_kfree(void *pMemory); -void internal_vunmap(void *pMemory); -void *internal_vmalloc(size_t nSize); -void internal_vfree(void *pMemory); -unsigned long internal_get_zeroed_page(int nPriority); -void internal_free_page(unsigned long pPage); +void *internal_kmalloc(size_t size, int priority); +void internal_kfree(void *ptr); +void internal_vunmap(void *ptr); +void *internal_vmalloc(size_t size); +void internal_vfree(void *ptr); +unsigned long internal_get_zeroed_page(int priority); +void internal_free_page(unsigned long addr); int internal_get_user_pages( struct task_struct *tsk, struct mm_struct *mm, @@ -98,5 +100,4 @@ int internal_get_user_pages( struct vm_area_struct **vmas); void internal_get_page(struct page *page); void internal_page_cache_release(struct page *page); -#endif /* __SCXLNX_UTIL_H__ */ - +#endif /* __TF_UTIL_H__ */ diff --git a/security/smc/omap4/scxlnx_mshield.h b/security/smc/tf_zebra.h index 9457ca9..b30fe6f 100644 --- a/security/smc/omap4/scxlnx_mshield.h +++ b/security/smc/tf_zebra.h @@ -1,5 +1,5 @@ /** - * Copyright (c) 2010 Trusted Logic S.A. + * Copyright (c) 2011 Trusted Logic S.A. * All Rights Reserved. * * This program is free software; you can redistribute it and/or @@ -17,17 +17,17 @@ * MA 02111-1307 USA */ -#ifndef __SCXLNX_MSHIELD_H__ -#define __SCXLNX_MSHIELD_H__ +#ifndef __TF_ZEBRA_H__ +#define __TF_ZEBRA_H__ -#include "scxlnx_defs.h" +#include "tf_defs.h" -int SCXLNXCtrlDeviceRegister(void); +int tf_ctrl_device_register(void); -int SCXLNXCommStart(struct SCXLNX_COMM *pComm, - u32 nWorkspaceAddr, u32 nWorkspaceSize, - u8 *pPABufferVAddr, u32 nPABufferSize, - u8 *pPropertiesBuffer, u32 nPropertiesBufferLength); +int tf_start(struct tf_comm *comm, + u32 workspace_addr, u32 workspace_size, + u8 *pa_buffer, u32 pa_size, + u8 *properties_buffer, u32 properties_length); /* Assembler entry points to/from secure */ u32 schedule_secure_world(u32 app_id, u32 proc_id, u32 flags, u32 args); @@ -35,10 +35,10 @@ u32 rpc_handler(u32 p1, u32 p2, u32 p3, u32 p4); u32 read_mpidr(void); /* L4 SEC clockdomain enabling/disabling */ -void tf_l4sec_clkdm_wakeup(bool use_spin_lock, bool wakelock); -void tf_l4sec_clkdm_allow_idle(bool use_spin_lock, bool wakeunlock); +void tf_l4sec_clkdm_wakeup(bool wakelock); +void tf_l4sec_clkdm_allow_idle(bool wakeunlock); /* Delayed secure resume */ int tf_delayed_secure_resume(void); -#endif +#endif /* __TF_ZEBRA_H__ */ |