aboutsummaryrefslogtreecommitdiffstats
path: root/security
diff options
context:
space:
mode:
authorTrusted Logic <smc_support@trusted-logic.com>2011-09-30 20:44:11 -0700
committerDan Murphy <dmurphy@ti.com>2011-10-12 08:06:26 -0500
commit4048a9f4ba16bf5ba061ddb8b5b66cd8d8d46e26 (patch)
tree6a5cf085834eb3240f25529faff4c684543b66b2 /security
parent7e7dae9eaf1fa2a244db52933219ecce7d028cdd (diff)
downloadkernel_samsung_espresso10-4048a9f4ba16bf5ba061ddb8b5b66cd8d8d46e26.zip
kernel_samsung_espresso10-4048a9f4ba16bf5ba061ddb8b5b66cd8d8d46e26.tar.gz
kernel_samsung_espresso10-4048a9f4ba16bf5ba061ddb8b5b66cd8d8d46e26.tar.bz2
OMAP4: SMC: 01.04 P6 Release
This patch is to port SMC 01.04 P6 Release for K3.0 Change-Id: Ia95fffd927b353eca3caf58bc4097eed97544705 Signed-off-by: Gonzalo Alexandre <alexandre.gonzalo@trusted-logic.com> Signed-off-by: Florian Sylvestre <florian.sylvestre@trusted-logic.com> Signed-off-by: Praneeth Bajjuri <praneeth@ti.com> Signed-off-by: Trusted Logic <smc_support@trusted-logic.com> Signed-off-by: Bryan Buckley <bryan.buckley@ti.com>
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig1
-rw-r--r--security/Makefile1
-rw-r--r--security/smc/Kconfig45
-rw-r--r--security/smc/Makefile41
-rw-r--r--security/smc/bridge_pub2sec.S242
-rw-r--r--security/smc/s_version.h92
-rw-r--r--security/smc/tf_comm.c1748
-rw-r--r--security/smc/tf_comm.h204
-rw-r--r--security/smc/tf_comm_mshield.c1011
-rw-r--r--security/smc/tf_conn.c1567
-rw-r--r--security/smc/tf_conn.h87
-rw-r--r--security/smc/tf_crypto.c1278
-rw-r--r--security/smc/tf_crypto.h349
-rw-r--r--security/smc/tf_crypto_aes.c1380
-rw-r--r--security/smc/tf_crypto_des.c404
-rw-r--r--security/smc/tf_crypto_digest.c992
-rw-r--r--security/smc/tf_defs.h544
-rw-r--r--security/smc/tf_device.c652
-rw-r--r--security/smc/tf_device_mshield.c351
-rw-r--r--security/smc/tf_dma.c106
-rw-r--r--security/smc/tf_dma.h64
-rw-r--r--security/smc/tf_protocol.h669
-rw-r--r--security/smc/tf_util.c1145
-rw-r--r--security/smc/tf_util.h103
-rw-r--r--security/smc/tf_zebra.h44
25 files changed, 13120 insertions, 0 deletions
diff --git a/security/Kconfig b/security/Kconfig
index e0f08b5..f76afcc 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -187,6 +187,7 @@ source security/tomoyo/Kconfig
source security/apparmor/Kconfig
source security/integrity/ima/Kconfig
+source security/smc/Kconfig
choice
prompt "Default security module"
diff --git a/security/Makefile b/security/Makefile
index 8bb0fe9..968c101 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -26,3 +26,4 @@ obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
subdir-$(CONFIG_IMA) += integrity/ima
obj-$(CONFIG_IMA) += integrity/ima/built-in.o
+obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += smc/
diff --git a/security/smc/Kconfig b/security/smc/Kconfig
new file mode 100644
index 0000000..315912a
--- /dev/null
+++ b/security/smc/Kconfig
@@ -0,0 +1,45 @@
+config TF_ZEBRA
+ bool
+
+config SECURITY_MIDDLEWARE_COMPONENT
+ bool "Enable SMC Driver"
+ depends on ARCH_OMAP3 || ARCH_OMAP4
+ default n
+ select TF_ZEBRA
+ help
+ This option adds kernel support for communication with the SMC
+ Protected Application.
+
+ If you are unsure how to answer this question, answer N.
+
+config SMC_KERNEL_CRYPTO
+ bool "Register SMC into kernel crypto subsytem"
+ depends on SECURITY_MIDDLEWARE_COMPONENT
+ default n
+ help
+ This option enables crypto subsystem to use SMC and OMAP hardware
+ accelerators.
+
+ If you are unsure how to answer this question, answer Y.
+
+config SECURE_TRACE
+ bool "Enable SMC secure traces"
+ depends on SECURITY_MIDDLEWARE_COMPONENT && ARCH_OMAP4
+ default y
+ help
+ This option enables traces from the SMC Protected Application to be
+ displayed in kernel logs.
+
+config TF_DRIVER_DEBUG_SUPPORT
+ bool "Debug support"
+ depends on SECURITY_MIDDLEWARE_COMPONENT
+ default n
+ help
+ This options enables debug traces in the driver.
+
+config SMC_BENCH_SECURE_CYCLE
+ bool "Enable secure cycles benchmarks"
+ depends on TF_DRIVER_DEBUG_SUPPORT && ARCH_OMAP4
+ default n
+ help
+ This options enables benchmarks.
diff --git a/security/smc/Makefile b/security/smc/Makefile
new file mode 100644
index 0000000..af345a1
--- /dev/null
+++ b/security/smc/Makefile
@@ -0,0 +1,41 @@
+#
+# Copyright (c) 2006-2010 Trusted Logic S.A.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+ifdef S_VERSION_BUILD
+EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD)
+endif
+
+EXTRA_CFLAGS += -Iarch/arm/mach-omap2
+EXTRA_CFLAGS += -Iarch/arm/plat-omap/include/plat
+
+tf_driver-objs += tf_util.o
+tf_driver-objs += tf_conn.o
+tf_driver-objs += tf_device.o
+tf_driver-objs += tf_comm.o
+tf_driver-objs += tf_crypto.o
+tf_driver-objs += tf_crypto_digest.o
+tf_driver-objs += tf_crypto_aes.o
+tf_driver-objs += tf_crypto_des.o
+tf_driver-objs += tf_dma.o
+tf_driver-objs += tf_comm_mshield.o
+tf_driver-objs += tf_device_mshield.o
+tf_driver-objs += bridge_pub2sec.o
+
+obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += tf_driver.o
diff --git a/security/smc/bridge_pub2sec.S b/security/smc/bridge_pub2sec.S
new file mode 100644
index 0000000..15cd3b7
--- /dev/null
+++ b/security/smc/bridge_pub2sec.S
@@ -0,0 +1,242 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+.text
+
+#define SMICODEPUB_IRQ_END 0xFE
+#define SMICODEPUB_FIQ_END 0xFD
+#define SMICODEPUB_RPC_END 0xFC
+
+#define PUB2SEC_NOCST 0xFF
+#define SMICODEPUB_NEWTASK 0x00
+
+/*
+ * RPC status:
+ * - 0: the secure world yielded due to an interrupt
+ * - 1: the secure world yielded on an RPC (no public thread is handling it)
+ * - 2: the secure world yielded on an RPC and the response is ready
+ */
+#define RPC_ADVANCEMENT_NONE 0
+#define RPC_ADVANCEMENT_PENDING 1
+#define RPC_ADVANCEMENT_FINISHED 2
+
+#ifdef CONFIG_ARM_ERRATA_430973
+#define INVALIDATE_BTB MCR p15, 0, R0, c7, c5, 6
+#else
+#define INVALIDATE_BTB
+#endif
+
+schedule_secure_world:
+ .global schedule_secure_world
+
+ /* Save registers */
+ push {r4-r12, lr}
+
+ /* Copy the Secure Service ID in r12 */
+ mov r12, r0
+
+ cmp r0, #SMICODEPUB_IRQ_END
+ beq return_from_irq
+
+ cmp r0, #SMICODEPUB_RPC_END
+ beq return_from_rpc
+
+ mov r6, #PUB2SEC_NOCST
+ mov r12, #SMICODEPUB_NEWTASK
+
+ b label_smc
+
+return_from_rpc:
+ ldr r9, =g_RPC_parameters
+ ldm r9, {r0-r3}
+ /* fall through */
+
+return_from_irq:
+ ldr r10, =g_secure_task_id
+ ldr r6, [r10]
+
+ b label_smc
+
+label_smc:
+ INVALIDATE_BTB
+ dsb
+ dmb
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+ /* Come from Non Secure: activate counter 1 (write to 0 are ignored) */
+ mov r4, #0x00000002
+
+ /* Read Count Enable Set Register */
+ mcr p15, 0x0, r4, c9, c12, 1
+
+ /* Come from Non Secure: stop counter 0 (write to 0 are ignored) */
+ mov r4, #0x00000001
+
+ /* Write Count Enable Clear Register */
+ mcr p15, 0x0, r4, c9, c12, 2
+#endif
+
+ smc #0
+ b service_end
+ nop
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+ /* Come from Secure: activate counter 0 (write to 0 are ignored) */
+ mov r4, #0x00000001
+
+ /* Write Count Enable Set Register */
+ mcr p15, 0x0, r4, c9, c12, 1
+
+ /* Come from Secure: stop counter 1 (write to 0 are ignored) */
+ mov r4, #0x00000002
+
+ /* Write Count Enable Clear Register */
+ mcr p15, 0x0, r4, c9, c12, 2
+#endif
+
+ INVALIDATE_BTB
+ ldr r8, =g_secure_task_id
+ str r6, [r8]
+
+ mov r0, #0x00
+ ldr r8, =g_service_end
+ str r0, [r8]
+
+ b schedule_secure_world_exit
+
+service_end:
+
+schedule_secure_world_exit:
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+ /* Come from Secure: activate counter 0 (write to 0 are ignored) */
+ mov r4, #0x00000001
+
+ /* Write Count Enable Set Register */
+ mcr p15, 0x0, r4, c9, c12, 1
+
+ /* Come from Secure: stop counter 1 (write to 0 are ignored) */
+ mov r4, #0x00000002
+
+ /* Write Count Enable Clear Register */
+ mcr p15, 0x0, r4, c9, c12, 2
+#endif
+
+ INVALIDATE_BTB
+
+ /* Restore registers */
+ pop {r4-r12, pc}
+
+rpc_handler:
+ .global rpc_handler
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+ /* Come from Secure: activate counter 0 (write to 0 are ignored) */
+ mov r4, #0x00000001
+
+ /* Write Count Enable Set Register */
+ mcr p15, 0x0, r4, c9, c12, 1
+
+ /* Come from Secure: stop counter 1 (write to 0 are ignored) */
+ mov r4, #0x00000002
+
+ /* Write Count Enable Clear Register */
+ mcr p15, 0x0, r4, c9, c12, 2
+#endif
+ INVALIDATE_BTB
+
+ /* g_RPC_advancement = RPC_ADVANCEMENT_PENDING */
+ ldr r8, =g_RPC_advancement
+ mov r9, #RPC_ADVANCEMENT_PENDING
+ str r9, [r8]
+
+ ldr r8, =g_RPC_parameters
+ stm r8, {r0-r3}
+
+ ldr r8, =g_secure_task_id
+ str r6, [r8]
+
+ mov r0, #0x00
+ ldr r8, =g_service_end
+ str r0, [r8]
+
+ /* Restore registers */
+ pop {r4-r12, pc}
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+
+setup_counters:
+ .global setup_counters
+
+ push {r14}
+
+ mrc p15, 0, r2, c9, c12, 0
+ orr r2, r2, #0x3
+ mcr p15, 0, r2, c9, c12, 0
+
+ mrc p15, 0, r2, c9, c12, 1
+ orr r2, r2, #0x80000000
+ mcr p15, 0, r2, c9, c12, 1
+
+ pop {pc}
+
+run_code_speed:
+ .global run_code_speed
+
+ push {r14}
+
+ /* Reset cycle counter */
+ mov r2, #0
+ mcr p15, 0, r2, c9, c13, 0
+
+run_code_speed_loop:
+ sub r0, r0, #1
+ cmp r0, #0
+ bne run_code_speed_loop
+
+ /* Read cycle counter */
+ mrc p15, 0, r0, c9, c13, 0
+
+ pop {pc}
+
+run_data_speed:
+ .global run_data_speed
+
+ push {r14}
+
+ /* Reset cycle counter */
+ mov r2, #0
+ mcr p15, 0, r2, c9, c13, 0
+
+run_data_speed_loop:
+ sub r0, r0, #1
+ ldr r2, [r1]
+ cmp r0, #0
+ bne run_data_speed_loop
+
+ /* read cycle counter */
+ mrc p15, 0, r0, c9, c13, 0
+
+ pop {pc}
+
+#endif
+
+read_mpidr:
+ .global read_mpidr
+ mrc p15, 0, r0, c0, c0, 5
+ bx lr
diff --git a/security/smc/s_version.h b/security/smc/s_version.h
new file mode 100644
index 0000000..a16d548
--- /dev/null
+++ b/security/smc/s_version.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __S_VERSION_H__
+#define __S_VERSION_H__
+
+/*
+ * Usage: define S_VERSION_BUILD on the compiler's command line.
+ *
+ * Then set:
+ * - S_VERSION_OS
+ * - S_VERSION_PLATFORM
+ * - S_VERSION_MAIN
+ * - S_VERSION_ENG is optional
+ * - S_VERSION_PATCH is optional
+ * - S_VERSION_BUILD = 0 if S_VERSION_BUILD not defined or empty
+ */
+
+#define S_VERSION_OS "A" /* "A" for all Android */
+#define S_VERSION_PLATFORM "G" /* "G" for 4430 */
+
+/*
+ * This version number must be updated for each new release
+ */
+#define S_VERSION_MAIN "01.04"
+
+/*
+* If this is a patch or engineering version use the following
+* defines to set the version number. Else set these values to 0.
+*/
+#define S_VERSION_PATCH 6
+#define S_VERSION_ENG 0
+
+#ifdef S_VERSION_BUILD
+/* TRICK: detect if S_VERSION is defined but empty */
+#if 0 == S_VERSION_BUILD-0
+#undef S_VERSION_BUILD
+#define S_VERSION_BUILD 0
+#endif
+#else
+/* S_VERSION_BUILD is not defined */
+#define S_VERSION_BUILD 0
+#endif
+
+#define __STRINGIFY(X) #X
+#define __STRINGIFY2(X) __STRINGIFY(X)
+
+#if S_VERSION_ENG != 0
+#define _S_VERSION_ENG "e" __STRINGIFY2(S_VERSION_ENG)
+#else
+#define _S_VERSION_ENG ""
+#endif
+
+#if S_VERSION_PATCH != 0
+#define _S_VERSION_PATCH "p" __STRINGIFY2(S_VERSION_PATCH)
+#else
+#define _S_VERSION_PATCH ""
+#endif
+
+#if !defined(NDEBUG) || defined(_DEBUG)
+#define S_VERSION_VARIANT "D "
+#else
+#define S_VERSION_VARIANT " "
+#endif
+
+#define S_VERSION_STRING \
+ "SMC" \
+ S_VERSION_OS \
+ S_VERSION_PLATFORM \
+ S_VERSION_MAIN \
+ _S_VERSION_PATCH \
+ _S_VERSION_ENG \
+ "." __STRINGIFY2(S_VERSION_BUILD) " " \
+ S_VERSION_VARIANT
+
+#endif /* __S_VERSION_H__ */
diff --git a/security/smc/tf_comm.c b/security/smc/tf_comm.c
new file mode 100644
index 0000000..ed05262
--- /dev/null
+++ b/security/smc/tf_comm.c
@@ -0,0 +1,1748 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <linux/version.h>
+#include <asm/cputype.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/jiffies.h>
+#include <linux/freezer.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_protocol.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_zebra.h"
+#endif
+
+/*---------------------------------------------------------------------------
+ * Internal Constants
+ *---------------------------------------------------------------------------*/
+
+/*
+ * shared memories descriptor constants
+ */
+#define DESCRIPTOR_B_MASK (1 << 2)
+#define DESCRIPTOR_C_MASK (1 << 3)
+#define DESCRIPTOR_S_MASK (1 << 10)
+
+#define L1_COARSE_DESCRIPTOR_BASE (0x00000001)
+#define L1_COARSE_DESCRIPTOR_ADDR_MASK (0xFFFFFC00)
+#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
+
+#define L2_PAGE_DESCRIPTOR_BASE (0x00000003)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ (0x220)
+#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
+
+#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
+#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
+
+/*
+ * Reject an attempt to share a strongly-Ordered or Device memory
+ * Strongly-Ordered: TEX=0b000, C=0, B=0
+ * Shared Device: TEX=0b000, C=0, B=1
+ * Non-Shared Device: TEX=0b010, C=0, B=0
+ */
+#define L2_TEX_C_B_MASK \
+ ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
+#define L2_TEX_C_B_STRONGLY_ORDERED \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
+#define L2_TEX_C_B_SHARED_DEVICE \
+ ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
+#define L2_TEX_C_B_NON_SHARED_DEVICE \
+ ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
+
+#define CACHE_S(x) ((x) & (1 << 24))
+#define CACHE_DSIZE(x) (((x) >> 12) & 4095)
+
+#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
+#define TIME_INFINITE ((u64) 0xFFFFFFFFFFFFFFFFULL)
+
+/*---------------------------------------------------------------------------
+ * atomic operation definitions
+ *---------------------------------------------------------------------------*/
+
+/*
+ * Atomically updates the sync_serial_n and time_n register
+ * sync_serial_n and time_n modifications are thread safe
+ */
+void tf_set_current_time(struct tf_comm *comm)
+{
+ u32 new_sync_serial;
+ struct timeval now;
+ u64 time64;
+
+ /*
+ * lock the structure while updating the L1 shared memory fields
+ */
+ spin_lock(&comm->lock);
+
+ /* read sync_serial_n and change the TimeSlot bit field */
+ new_sync_serial =
+ tf_read_reg32(&comm->pBuffer->sync_serial_n) + 1;
+
+ do_gettimeofday(&now);
+ time64 = now.tv_sec;
+ time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+ /* Write the new time64 and nSyncSerial into shared memory */
+ tf_write_reg64(&comm->pBuffer->time_n[new_sync_serial &
+ TF_SYNC_SERIAL_TIMESLOT_N], time64);
+ tf_write_reg32(&comm->pBuffer->sync_serial_n,
+ new_sync_serial);
+
+ spin_unlock(&comm->lock);
+}
+
+/*
+ * Performs the specific read timeout operation
+ * The difficulty here is to read atomically 2 u32
+ * values from the L1 shared buffer.
+ * This is guaranteed by reading before and after the operation
+ * the timeslot given by the Secure World
+ */
+static inline void tf_read_timeout(struct tf_comm *comm, u64 *time)
+{
+ u32 sync_serial_s_initial = 0;
+ u32 sync_serial_s_final = 1;
+ u64 time64;
+
+ spin_lock(&comm->lock);
+
+ while (sync_serial_s_initial != sync_serial_s_final) {
+ sync_serial_s_initial = tf_read_reg32(
+ &comm->pBuffer->sync_serial_s);
+ time64 = tf_read_reg64(
+ &comm->pBuffer->timeout_s[sync_serial_s_initial&1]);
+
+ sync_serial_s_final = tf_read_reg32(
+ &comm->pBuffer->sync_serial_s);
+ }
+
+ spin_unlock(&comm->lock);
+
+ *time = time64;
+}
+
+/*----------------------------------------------------------------------------
+ * SIGKILL signal handling
+ *----------------------------------------------------------------------------*/
+
+static bool sigkill_pending(void)
+{
+ if (signal_pending(current)) {
+ dprintk(KERN_INFO "A signal is pending\n");
+ if (sigismember(&current->pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending\n");
+ return true;
+ } else if (sigismember(
+ &current->signal->shared_pending.signal, SIGKILL)) {
+ dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ u32 type)
+{
+ struct tf_coarse_page_table *coarse_pg_table = NULL;
+
+ spin_lock(&(alloc_context->lock));
+
+ if (!(list_empty(&(alloc_context->free_coarse_page_tables)))) {
+ /*
+ * The free list can provide us a coarse page table
+ * descriptor
+ */
+ coarse_pg_table = list_first_entry(
+ &alloc_context->free_coarse_page_tables,
+ struct tf_coarse_page_table, list);
+ list_del(&(coarse_pg_table->list));
+
+ coarse_pg_table->parent->ref_count++;
+ } else {
+ /* no array of coarse page tables, create a new one */
+ struct tf_coarse_page_table_array *array;
+ void *page;
+ int i;
+
+ spin_unlock(&(alloc_context->lock));
+
+ /* first allocate a new page descriptor */
+ array = internal_kmalloc(sizeof(*array), GFP_KERNEL);
+ if (array == NULL) {
+ dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+ " failed to allocate a table array\n",
+ alloc_context);
+ return NULL;
+ }
+
+ array->type = type;
+ INIT_LIST_HEAD(&(array->list));
+
+ /* now allocate the actual page the page descriptor describes */
+ page = (void *) internal_get_zeroed_page(GFP_KERNEL);
+ if (page == NULL) {
+ dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
+ " failed allocate a page\n",
+ alloc_context);
+ internal_kfree(array);
+ return NULL;
+ }
+
+ spin_lock(&(alloc_context->lock));
+
+ /* initialize the coarse page table descriptors */
+ for (i = 0; i < 4; i++) {
+ INIT_LIST_HEAD(&(array->coarse_page_tables[i].list));
+ array->coarse_page_tables[i].descriptors =
+ page + (i * SIZE_1KB);
+ array->coarse_page_tables[i].parent = array;
+
+ if (i == 0) {
+ /*
+ * the first element is kept for the current
+ * coarse page table allocation
+ */
+ coarse_pg_table =
+ &(array->coarse_page_tables[i]);
+ array->ref_count++;
+ } else {
+ /*
+ * The other elements are added to the free list
+ */
+ list_add(&(array->coarse_page_tables[i].list),
+ &(alloc_context->
+ free_coarse_page_tables));
+ }
+ }
+
+ list_add(&(array->list),
+ &(alloc_context->coarse_page_table_arrays));
+ }
+ spin_unlock(&(alloc_context->lock));
+
+ return coarse_pg_table;
+}
+
+
+void tf_free_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_coarse_page_table *coarse_pg_table,
+ int force)
+{
+ struct tf_coarse_page_table_array *array;
+
+ spin_lock(&(alloc_context->lock));
+
+ array = coarse_pg_table->parent;
+
+ (array->ref_count)--;
+
+ if (array->ref_count == 0) {
+ /*
+ * no coarse page table descriptor is used
+ * check if we should free the whole page
+ */
+
+ if ((array->type == TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
+ && (force == 0))
+ /*
+ * This is a preallocated page,
+ * add the page back to the free list
+ */
+ list_add(&(coarse_pg_table->list),
+ &(alloc_context->free_coarse_page_tables));
+ else {
+ /*
+ * None of the page's coarse page table descriptors
+ * are in use, free the whole page
+ */
+ int i;
+ u32 *descriptors;
+
+ /*
+ * remove the page's associated coarse page table
+ * descriptors from the free list
+ */
+ for (i = 0; i < 4; i++)
+ if (&(array->coarse_page_tables[i]) !=
+ coarse_pg_table)
+ list_del(&(array->
+ coarse_page_tables[i].list));
+
+ descriptors =
+ array->coarse_page_tables[0].descriptors;
+ array->coarse_page_tables[0].descriptors = NULL;
+
+ /* remove the coarse page table from the array */
+ list_del(&(array->list));
+
+ spin_unlock(&(alloc_context->lock));
+ /*
+ * Free the page.
+ * The address of the page is contained in the first
+ * element
+ */
+ internal_free_page((unsigned long) descriptors);
+ /* finaly free the array */
+ internal_kfree(array);
+
+ spin_lock(&(alloc_context->lock));
+ }
+ } else {
+ /*
+ * Some coarse page table descriptors are in use.
+ * Add the descriptor to the free list
+ */
+ list_add(&(coarse_pg_table->list),
+ &(alloc_context->free_coarse_page_tables));
+ }
+
+ spin_unlock(&(alloc_context->lock));
+}
+
+
+void tf_init_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+ spin_lock_init(&(alloc_context->lock));
+ INIT_LIST_HEAD(&(alloc_context->coarse_page_table_arrays));
+ INIT_LIST_HEAD(&(alloc_context->free_coarse_page_tables));
+}
+
+void tf_release_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context)
+{
+ spin_lock(&(alloc_context->lock));
+
+ /* now clean up the list of page descriptors */
+ while (!list_empty(&(alloc_context->coarse_page_table_arrays))) {
+ struct tf_coarse_page_table_array *page_desc;
+ u32 *descriptors;
+
+ page_desc = list_first_entry(
+ &alloc_context->coarse_page_table_arrays,
+ struct tf_coarse_page_table_array, list);
+
+ descriptors = page_desc->coarse_page_tables[0].descriptors;
+ list_del(&(page_desc->list));
+
+ spin_unlock(&(alloc_context->lock));
+
+ if (descriptors != NULL)
+ internal_free_page((unsigned long)descriptors);
+
+ internal_kfree(page_desc);
+
+ spin_lock(&(alloc_context->lock));
+ }
+
+ spin_unlock(&(alloc_context->lock));
+}
+
+/*
+ * Returns the L1 coarse page descriptor for
+ * a coarse page table located at address coarse_pg_table_descriptors
+ */
+u32 tf_get_l1_coarse_descriptor(
+ u32 coarse_pg_table_descriptors[256])
+{
+ u32 descriptor = L1_COARSE_DESCRIPTOR_BASE;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ descriptor |= (virt_to_phys((void *) coarse_pg_table_descriptors)
+ & L1_COARSE_DESCRIPTOR_ADDR_MASK);
+
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
+ dprintk(KERN_DEBUG "tf_get_l1_coarse_descriptor "
+ "V31-12 added to descriptor\n");
+ /* the 16k alignment restriction applies */
+ descriptor |= (DESCRIPTOR_V13_12_GET(
+ (u32)coarse_pg_table_descriptors) <<
+ L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
+ }
+
+ return descriptor;
+}
+
+
+#define dprintk_desc(...)
+/*
+ * Returns the L2 descriptor for the specified user page.
+ */
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
+ u32 *hwpte;
+ u32 tex = 0;
+ u32 descriptor = 0;
+
+ dprintk_desc(KERN_INFO "VirtAddr = %x\n", vaddr);
+ pgd = pgd_offset(mm, vaddr);
+ dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
+ (unsigned int) *pgd);
+ if (pgd_none(*pgd))
+ goto error;
+ pud = pud_offset(pgd, vaddr);
+ dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
+ (unsigned int) *pud);
+ if (pud_none(*pud))
+ goto error;
+ pmd = pmd_offset(pud, vaddr);
+ dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
+ (unsigned int) *pmd);
+ if (pmd_none(*pmd))
+ goto error;
+
+ if (PMD_TYPE_SECT&(*pmd)) {
+ /* We have a section */
+ dprintk_desc(KERN_INFO "Section descr=%x\n",
+ (unsigned int)*pmd);
+ if ((*pmd) & PMD_SECT_BUFFERABLE)
+ descriptor |= DESCRIPTOR_B_MASK;
+ if ((*pmd) & PMD_SECT_CACHEABLE)
+ descriptor |= DESCRIPTOR_C_MASK;
+ if ((*pmd) & PMD_SECT_S)
+ descriptor |= DESCRIPTOR_S_MASK;
+ tex = ((*pmd) >> 12) & 7;
+ } else {
+ /* We have a table */
+ ptep = pte_offset_map(pmd, vaddr);
+ if (pte_present(*ptep)) {
+ dprintk_desc(KERN_INFO "L2 descr=%x\n",
+ (unsigned int) *ptep);
+ if ((*ptep) & L_PTE_MT_BUFFERABLE)
+ descriptor |= DESCRIPTOR_B_MASK;
+ if ((*ptep) & L_PTE_MT_WRITETHROUGH)
+ descriptor |= DESCRIPTOR_C_MASK;
+ if ((*ptep) & L_PTE_MT_DEV_SHARED)
+ descriptor |= DESCRIPTOR_S_MASK;
+
+ /*
+ * Linux's pte doesn't keep track of TEX value.
+ * Have to jump to hwpte see include/asm/pgtable.h
+ */
+ hwpte = (u32 *) (((u32) ptep) - 0x800);
+ if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
+ ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
+ goto error;
+ dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
+ tex = ((*hwpte) >> 6) & 7;
+ pte_unmap(ptep);
+ } else {
+ pte_unmap(ptep);
+ goto error;
+ }
+ }
+
+ descriptor |= (tex << 6);
+
+ return descriptor;
+
+error:
+ dprintk(KERN_ERR "Error occured in %s\n", __func__);
+ return 0;
+}
+
+
+/*
+ * Changes an L2 page descriptor back to a pointer to a physical page
+ */
+inline struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor)
+{
+ return pte_page(l2_page_descriptor & L2_DESCRIPTOR_ADDR_MASK);
+}
+
+
+/*
+ * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
+ * must be in the kernel address space.
+ */
+static void tf_get_l2_page_descriptor(
+ u32 *l2_page_descriptor,
+ u32 flags, struct mm_struct *mm)
+{
+ unsigned long page_vaddr;
+ u32 descriptor;
+ struct page *page;
+ bool unmap_page = false;
+
+ dprintk(KERN_INFO
+ "tf_get_l2_page_descriptor():"
+ "*l2_page_descriptor=%x\n",
+ *l2_page_descriptor);
+
+ if (*l2_page_descriptor == L2_DESCRIPTOR_FAULT)
+ return;
+
+ page = (struct page *) (*l2_page_descriptor);
+
+ page_vaddr = (unsigned long) page_address(page);
+ if (page_vaddr == 0) {
+ dprintk(KERN_INFO "page_address returned 0\n");
+ /* Should we use kmap_atomic(page, KM_USER0) instead ? */
+ page_vaddr = (unsigned long) kmap(page);
+ if (page_vaddr == 0) {
+ *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+ dprintk(KERN_ERR "kmap returned 0\n");
+ return;
+ }
+ unmap_page = true;
+ }
+
+ descriptor = tf_get_l2_descriptor_common(page_vaddr, mm);
+ if (descriptor == 0) {
+ *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
+ return;
+ }
+ descriptor |= L2_PAGE_DESCRIPTOR_BASE;
+
+ descriptor |= (page_to_phys(page) & L2_DESCRIPTOR_ADDR_MASK);
+
+ if (!(flags & TF_SHMEM_TYPE_WRITE))
+ /* only read access */
+ descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
+ else
+ /* read and write access */
+ descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
+
+ if (unmap_page)
+ kunmap(page);
+
+ *l2_page_descriptor = descriptor;
+}
+
+
+/*
+ * Unlocks the physical memory pages
+ * and frees the coarse pages that need to
+ */
+void tf_cleanup_shared_memory(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup)
+{
+ u32 coarse_page_index;
+
+ dprintk(KERN_INFO "tf_cleanup_shared_memory(%p)\n",
+ shmem_desc);
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "tf_cleanup_shared_memory "
+ "- number of coarse page tables=%d\n",
+ shmem_desc->coarse_pg_table_count);
+
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ shmem_desc->coarse_pg_table[coarse_page_index],
+ shmem_desc->coarse_pg_table[coarse_page_index]->
+ descriptors,
+ coarse_page_index);
+ if (shmem_desc->coarse_pg_table[coarse_page_index] != NULL) {
+ for (j = 0;
+ j < TF_DESCRIPTOR_TABLE_CAPACITY;
+ j += 8) {
+ int k;
+ printk(KERN_DEBUG " ");
+ for (k = j; k < j + 8; k++)
+ printk(KERN_DEBUG "%p ",
+ shmem_desc->coarse_pg_table[
+ coarse_page_index]->
+ descriptors);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ }
+ printk(KERN_DEBUG "tf_cleanup_shared_memory() - done\n\n");
+#endif
+
+ /* Parse the coarse page descriptors */
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+ u32 found = 0;
+
+ /* parse the page descriptors of the coarse page */
+ for (j = 0; j < TF_DESCRIPTOR_TABLE_CAPACITY; j++) {
+ u32 l2_page_descriptor = (u32) (shmem_desc->
+ coarse_pg_table[coarse_page_index]->
+ descriptors[j]);
+
+ if (l2_page_descriptor != L2_DESCRIPTOR_FAULT) {
+ struct page *page =
+ tf_l2_page_descriptor_to_page(
+ l2_page_descriptor);
+
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ internal_page_cache_release(page);
+
+ found = 1;
+ } else if (found == 1) {
+ break;
+ }
+ }
+
+ /*
+ * Only free the coarse pages of descriptors not preallocated
+ */
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (full_cleanup != 0))
+ tf_free_coarse_page_table(alloc_context,
+ shmem_desc->coarse_pg_table[coarse_page_index],
+ 0);
+ }
+
+ shmem_desc->coarse_pg_table_count = 0;
+ dprintk(KERN_INFO "tf_cleanup_shared_memory(%p) done\n",
+ shmem_desc);
+}
+
+/*
+ * Make sure the coarse pages are allocated. If not allocated, do it Locks down
+ * the physical memory pages
+ * Verifies the memory attributes depending on flags
+ */
+int tf_fill_descriptor_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 buffer,
+ struct vm_area_struct **vmas,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 buffer_size,
+ u32 *buffer_start_offset,
+ bool in_user_space,
+ u32 flags,
+ u32 *descriptor_count)
+{
+ u32 coarse_page_index;
+ u32 coarse_page_count;
+ u32 page_count;
+ u32 page_shift = 0;
+ int error;
+ unsigned int info = read_cpuid(CPUID_CACHETYPE);
+
+ dprintk(KERN_INFO "tf_fill_descriptor_table"
+ "(%p, buffer=0x%08X, size=0x%08X, user=%01x "
+ "flags = 0x%08x)\n",
+ shmem_desc,
+ buffer,
+ buffer_size,
+ in_user_space,
+ flags);
+
+ /*
+ * Compute the number of pages
+ * Compute the number of coarse pages
+ * Compute the page offset
+ */
+ page_count = ((buffer & ~PAGE_MASK) +
+ buffer_size + ~PAGE_MASK) >> PAGE_SHIFT;
+
+ /* check whether the 16k alignment restriction applies */
+ if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
+ /*
+ * The 16k alignment restriction applies.
+ * Shift data to get them 16k aligned
+ */
+ page_shift = DESCRIPTOR_V13_12_GET(buffer);
+ page_count += page_shift;
+
+
+ /*
+ * Check the number of pages fit in the coarse pages
+ */
+ if (page_count > (TF_DESCRIPTOR_TABLE_CAPACITY *
+ TF_MAX_COARSE_PAGES)) {
+ dprintk(KERN_ERR "tf_fill_descriptor_table(%p): "
+ "%u pages required to map shared memory!\n",
+ shmem_desc, page_count);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* coarse page describe 256 pages */
+ coarse_page_count = ((page_count +
+ TF_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
+ TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
+
+ /*
+ * Compute the buffer offset
+ */
+ *buffer_start_offset = (buffer & ~PAGE_MASK) |
+ (page_shift << PAGE_SHIFT);
+
+ /* map each coarse page */
+ for (coarse_page_index = 0;
+ coarse_page_index < coarse_page_count;
+ coarse_page_index++) {
+ u32 j;
+ struct tf_coarse_page_table *coarse_pg_table;
+
+ /* compute a virtual address with appropriate offset */
+ u32 buffer_offset_vaddr = buffer +
+ (coarse_page_index * TF_MAX_COARSE_PAGE_MAPPED_SIZE);
+ u32 pages_to_get;
+
+ /*
+ * Compute the number of pages left for this coarse page.
+ * Decrement page_count each time
+ */
+ pages_to_get = (page_count >>
+ TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
+ TF_DESCRIPTOR_TABLE_CAPACITY : page_count;
+ page_count -= pages_to_get;
+
+ /*
+ * Check if the coarse page has already been allocated
+ * If not, do it now
+ */
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM)
+ || (shmem_desc->type ==
+ TF_SHMEM_TYPE_PM_HIBERNATE)) {
+ coarse_pg_table = tf_alloc_coarse_page_table(
+ alloc_context,
+ TF_PAGE_DESCRIPTOR_TYPE_NORMAL);
+
+ if (coarse_pg_table == NULL) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table(%p):"
+ " SCXLNXConnAllocateCoarsePageTable "
+ "failed for coarse page %d\n",
+ shmem_desc, coarse_page_index);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ shmem_desc->coarse_pg_table[coarse_page_index] =
+ coarse_pg_table;
+ } else {
+ coarse_pg_table =
+ shmem_desc->coarse_pg_table[coarse_page_index];
+ }
+
+ /*
+ * The page is not necessarily filled with zeroes.
+ * Set the fault descriptors ( each descriptor is 4 bytes long)
+ */
+ memset(coarse_pg_table->descriptors, 0x00,
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+
+ if (in_user_space) {
+ int pages;
+
+ /*
+ * TRICK: use pCoarsePageDescriptor->descriptors to
+ * hold the (struct page*) items before getting their
+ * physical address
+ */
+ down_read(&(current->mm->mmap_sem));
+ pages = internal_get_user_pages(
+ current,
+ current->mm,
+ buffer_offset_vaddr,
+ /*
+ * page_shift is cleared after retrieving first
+ * coarse page
+ */
+ (pages_to_get - page_shift),
+ (flags & TF_SHMEM_TYPE_WRITE) ? 1 : 0,
+ 0,
+ (struct page **) (coarse_pg_table->descriptors
+ + page_shift),
+ vmas);
+ up_read(&(current->mm->mmap_sem));
+
+ if ((pages <= 0) ||
+ (pages != (pages_to_get - page_shift))) {
+ dprintk(KERN_ERR"tf_fill_descriptor_table:"
+ " get_user_pages got %d pages while "
+ "trying to get %d pages!\n",
+ pages, pages_to_get - page_shift);
+ error = -EFAULT;
+ goto error;
+ }
+
+ for (j = page_shift;
+ j < page_shift + pages;
+ j++) {
+ /* Get the actual L2 descriptors */
+ tf_get_l2_page_descriptor(
+ &coarse_pg_table->descriptors[j],
+ flags,
+ current->mm);
+ /*
+ * Reject Strongly-Ordered or Device Memory
+ */
+#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
+ ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
+ (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
+
+ if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
+ coarse_pg_table->
+ descriptors[j])) {
+ dprintk(KERN_ERR
+ "tf_fill_descriptor_table:"
+ " descriptor 0x%08X use "
+ "strongly-ordered or device "
+ "memory. Rejecting!\n",
+ coarse_pg_table->
+ descriptors[j]);
+ error = -EFAULT;
+ goto error;
+ }
+ }
+ } else {
+ /* Kernel-space memory */
+ for (j = page_shift;
+ j < pages_to_get;
+ j++) {
+ unsigned long addr =
+ (unsigned long) (buffer_offset_vaddr +
+ ((j - page_shift) *
+ PAGE_SIZE));
+ coarse_pg_table->descriptors[j] =
+ (u32) vmalloc_to_page((void *)addr);
+ get_page((struct page *) coarse_pg_table->
+ descriptors[j]);
+
+ /* change coarse page "page address" */
+ tf_get_l2_page_descriptor(
+ &coarse_pg_table->descriptors[j],
+ flags,
+ &init_mm);
+ }
+ }
+
+ dmac_flush_range((void *)coarse_pg_table->descriptors,
+ (void *)(((u32)(coarse_pg_table->descriptors)) +
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
+
+ outer_clean_range(
+ __pa(coarse_pg_table->descriptors),
+ __pa(coarse_pg_table->descriptors) +
+ TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
+ wmb();
+
+ /* Update the coarse page table address */
+ descriptors[coarse_page_index] =
+ tf_get_l1_coarse_descriptor(
+ coarse_pg_table->descriptors);
+
+ /*
+ * The next coarse page has no page shift, reset the
+ * page_shift
+ */
+ page_shift = 0;
+ }
+
+ *descriptor_count = coarse_page_count;
+ shmem_desc->coarse_pg_table_count = coarse_page_count;
+
+#ifdef DEBUG_COARSE_TABLES
+ printk(KERN_DEBUG "ntf_fill_descriptor_table - size=0x%08X "
+ "numberOfCoarsePages=%d\n", buffer_size,
+ shmem_desc->coarse_pg_table_count);
+ for (coarse_page_index = 0;
+ coarse_page_index < shmem_desc->coarse_pg_table_count;
+ coarse_page_index++) {
+ u32 j;
+ struct tf_coarse_page_table *coarse_page_table =
+ shmem_desc->coarse_pg_table[coarse_page_index];
+
+ printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
+ coarse_page_table,
+ coarse_page_table->descriptors,
+ coarse_page_index);
+ for (j = 0;
+ j < TF_DESCRIPTOR_TABLE_CAPACITY;
+ j += 8) {
+ int k;
+ printk(KERN_DEBUG " ");
+ for (k = j; k < j + 8; k++)
+ printk(KERN_DEBUG "0x%08X ",
+ coarse_page_table->descriptors[k]);
+ printk(KERN_DEBUG "\n");
+ }
+ }
+ printk(KERN_DEBUG "ntf_fill_descriptor_table() - done\n\n");
+#endif
+
+ return 0;
+
+error:
+ tf_cleanup_shared_memory(
+ alloc_context,
+ shmem_desc,
+ 0);
+
+ return error;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+u8 *tf_get_description(struct tf_comm *comm)
+{
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+ return comm->pBuffer->version_description;
+
+ return NULL;
+}
+
+/*
+ * Returns a non-zero value if the specified S-timeout has expired, zero
+ * otherwise.
+ *
+ * The placeholder referenced to by relative_timeout_jiffies gives the relative
+ * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
+ * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
+ */
+static int tf_test_s_timeout(
+ u64 timeout,
+ signed long *relative_timeout_jiffies)
+{
+ struct timeval now;
+ u64 time64;
+
+ *relative_timeout_jiffies = 0;
+
+ /* immediate timeout */
+ if (timeout == TIME_IMMEDIATE)
+ return 1;
+
+ /* infinite timeout */
+ if (timeout == TIME_INFINITE) {
+ dprintk(KERN_DEBUG "tf_test_s_timeout: "
+ "timeout is infinite\n");
+ *relative_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+ return 0;
+ }
+
+ do_gettimeofday(&now);
+ time64 = now.tv_sec;
+ /* will not overflow as operations are done on 64bit values */
+ time64 = (time64 * 1000) + (now.tv_usec / 1000);
+
+ /* timeout expired */
+ if (time64 >= timeout) {
+ dprintk(KERN_DEBUG "tf_test_s_timeout: timeout expired\n");
+ return 1;
+ }
+
+ /*
+ * finite timeout, compute relative_timeout_jiffies
+ */
+ /* will not overflow as time64 < timeout */
+ timeout -= time64;
+
+ /* guarantee *relative_timeout_jiffies is a valid timeout */
+ if ((timeout >> 32) != 0)
+ *relative_timeout_jiffies = MAX_JIFFY_OFFSET;
+ else
+ *relative_timeout_jiffies =
+ msecs_to_jiffies((unsigned int) timeout);
+
+ dprintk(KERN_DEBUG "tf_test_s_timeout: timeout is 0x%lx\n",
+ *relative_timeout_jiffies);
+ return 0;
+}
+
+static void tf_copy_answers(struct tf_comm *comm)
+{
+ u32 first_answer;
+ u32 first_free_answer;
+ struct tf_answer_struct *answerStructureTemp;
+
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+ spin_lock(&comm->lock);
+ first_free_answer = tf_read_reg32(
+ &comm->pBuffer->first_free_answer);
+ first_answer = tf_read_reg32(
+ &comm->pBuffer->first_answer);
+
+ while (first_answer != first_free_answer) {
+ /* answer queue not empty */
+ union tf_answer sComAnswer;
+ struct tf_answer_header header;
+
+ /*
+ * the size of the command in words of 32bit, not in
+ * bytes
+ */
+ u32 command_size;
+ u32 i;
+ u32 *temp = (uint32_t *) &header;
+
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_answers(%p): "
+ "Read answers from L1\n",
+ current->pid, comm);
+
+ /* Read the answer header */
+ for (i = 0;
+ i < sizeof(struct tf_answer_header)/sizeof(u32);
+ i++)
+ temp[i] = comm->pBuffer->answer_queue[
+ (first_answer + i) %
+ TF_S_ANSWER_QUEUE_CAPACITY];
+
+ /* Read the answer from the L1_Buffer*/
+ command_size = header.message_size +
+ sizeof(struct tf_answer_header)/sizeof(u32);
+ temp = (uint32_t *) &sComAnswer;
+ for (i = 0; i < command_size; i++)
+ temp[i] = comm->pBuffer->answer_queue[
+ (first_answer + i) %
+ TF_S_ANSWER_QUEUE_CAPACITY];
+
+ answerStructureTemp = (struct tf_answer_struct *)
+ sComAnswer.header.operation_id;
+
+ tf_dump_answer(&sComAnswer);
+
+ memcpy(answerStructureTemp->answer, &sComAnswer,
+ command_size * sizeof(u32));
+ answerStructureTemp->answer_copied = true;
+
+ first_answer += command_size;
+ tf_write_reg32(&comm->pBuffer->first_answer,
+ first_answer);
+ }
+ spin_unlock(&(comm->lock));
+ }
+}
+
+static void tf_copy_command(
+ struct tf_comm *comm,
+ union tf_command *command,
+ struct tf_connection *connection,
+ enum TF_COMMAND_STATE *command_status)
+{
+ if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+ && (command != NULL)) {
+ /*
+ * Write the message in the message queue.
+ */
+
+ if (*command_status == TF_COMMAND_STATE_PENDING) {
+ u32 command_size;
+ u32 queue_words_count;
+ u32 i;
+ u32 first_free_command;
+ u32 first_command;
+
+ spin_lock(&comm->lock);
+
+ first_command = tf_read_reg32(
+ &comm->pBuffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->pBuffer->first_free_command);
+
+ queue_words_count = first_free_command - first_command;
+ command_size = command->header.message_size +
+ sizeof(struct tf_command_header)/sizeof(u32);
+ if ((queue_words_count + command_size) <
+ TF_N_MESSAGE_QUEUE_CAPACITY) {
+ /*
+ * Command queue is not full.
+ * If the Command queue is full,
+ * the command will be copied at
+ * another iteration
+ * of the current function.
+ */
+
+ /*
+ * Change the conn state
+ */
+ if (connection == NULL)
+ goto copy;
+
+ spin_lock(&(connection->state_lock));
+
+ if ((connection->state ==
+ TF_CONN_STATE_NO_DEVICE_CONTEXT)
+ &&
+ (command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+
+ dprintk(KERN_INFO
+ "tf_copy_command(%p):"
+ "Conn state is DEVICE_CONTEXT_SENT\n",
+ connection);
+ connection->state =
+ TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
+ } else if ((connection->state !=
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+ &&
+ (command->header.message_type !=
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
+ /* The connection
+ * is no longer valid.
+ * We may not send any command on it,
+ * not even another
+ * DESTROY_DEVICE_CONTEXT.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Connection no longer valid."
+ "ABORT\n",
+ current->pid, connection);
+ *command_status =
+ TF_COMMAND_STATE_ABORTED;
+ spin_unlock(
+ &(connection->state_lock));
+ spin_unlock(
+ &comm->lock);
+ return;
+ } else if (
+ (command->header.message_type ==
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
+ (connection->state ==
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT)
+ ) {
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Conn state is "
+ "DESTROY_DEVICE_CONTEXT_SENT\n",
+ current->pid, connection);
+ connection->state =
+ TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
+ }
+ spin_unlock(&(connection->state_lock));
+copy:
+ /*
+ * Copy the command to L1 Buffer
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_copy_command(%p): "
+ "Write Message in the queue\n",
+ current->pid, command);
+ tf_dump_command(command);
+
+ for (i = 0; i < command_size; i++)
+ comm->pBuffer->command_queue[
+ (first_free_command + i) %
+ TF_N_MESSAGE_QUEUE_CAPACITY] =
+ ((uint32_t *) command)[i];
+
+ *command_status =
+ TF_COMMAND_STATE_SENT;
+ first_free_command += command_size;
+
+ tf_write_reg32(
+ &comm->
+ pBuffer->first_free_command,
+ first_free_command);
+ }
+ spin_unlock(&comm->lock);
+ }
+ }
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the command and waits for the answer
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_send_recv(struct tf_comm *comm,
+ union tf_command *command,
+ struct tf_answer_struct *answerStruct,
+ struct tf_connection *connection,
+ int bKillable
+ #ifdef CONFIG_TF_ZEBRA
+ , bool *secure_is_idle
+ #endif
+ )
+{
+ int result;
+ u64 timeout;
+ signed long nRelativeTimeoutJiffies;
+ bool wait_prepared = false;
+ enum TF_COMMAND_STATE command_status = TF_COMMAND_STATE_PENDING;
+ DEFINE_WAIT(wait);
+#ifdef CONFIG_FREEZER
+ unsigned long saved_flags;
+#endif
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
+ current->pid, command);
+
+#ifdef CONFIG_FREEZER
+ saved_flags = current->flags;
+ current->flags |= PF_FREEZER_NOSIG;
+#endif
+
+ /*
+ * Read all answers from the answer queue
+ */
+copy_answers:
+ tf_copy_answers(comm);
+
+ tf_copy_command(comm, command, connection, &command_status);
+
+ /*
+ * Notify all waiting threads
+ */
+ wake_up(&(comm->wait_queue));
+
+#ifdef CONFIG_FREEZER
+ if (unlikely(freezing(current))) {
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!(*secure_is_idle)) {
+ if (tf_schedule_secure_world(comm, true) ==
+ STATUS_PENDING)
+ goto copy_answers;
+
+ tf_l4sec_clkdm_allow_idle(true);
+ *secure_is_idle = true;
+ }
+#endif
+
+ dprintk(KERN_INFO
+ "Entering refrigerator.\n");
+ refrigerator();
+ dprintk(KERN_INFO
+ "Left refrigerator.\n");
+ goto copy_answers;
+ }
+#endif
+
+#ifndef CONFIG_PREEMPT
+ if (need_resched())
+ schedule();
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Handle RPC (if any)
+ */
+ if (tf_rpc_execute(comm) == RPC_NON_YIELD)
+ goto schedule_secure_world;
+#endif
+
+ /*
+ * Join wait queue
+ */
+ /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
+ current->pid, command);*/
+ prepare_to_wait(&comm->wait_queue, &wait,
+ bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ wait_prepared = true;
+
+ /*
+ * Check if our answer is available
+ */
+ if (command_status == TF_COMMAND_STATE_ABORTED) {
+ /* Not waiting for an answer, return error code */
+ result = -EINTR;
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Command status is ABORTED."
+ "Exit with 0x%x\n",
+ current->pid, result);
+ goto exit;
+ }
+ if (answerStruct->answer_copied) {
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "Received answer (type 0x%02X)\n",
+ current->pid,
+ answerStruct->answer->header.message_type);
+ result = 0;
+ goto exit;
+ }
+
+ /*
+ * Check if a signal is pending
+ */
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == TF_COMMAND_STATE_PENDING)
+ /*Command was not sent. */
+ result = -EINTR;
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ /*
+ * Check if secure world is schedulable. It is schedulable if at
+ * least one of the following conditions holds:
+ * + it is still initializing (TF_COMM_FLAG_L1_SHARED_ALLOCATED
+ * is not set);
+ * + there is a command in the queue;
+ * + the secure world timeout is zero.
+ */
+ if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
+ u32 first_free_command;
+ u32 first_command;
+ spin_lock(&comm->lock);
+ first_command = tf_read_reg32(
+ &comm->pBuffer->first_command);
+ first_free_command = tf_read_reg32(
+ &comm->pBuffer->first_free_command);
+ spin_unlock(&comm->lock);
+ tf_read_timeout(comm, &timeout);
+ if ((first_free_command == first_command) &&
+ (tf_test_s_timeout(timeout,
+ &nRelativeTimeoutJiffies) == 0))
+ /*
+ * If command queue is empty and if timeout has not
+ * expired secure world is not schedulable
+ */
+ goto wait;
+ }
+
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+
+ /*
+ * Yield to the Secure World
+ */
+#ifdef CONFIG_TF_ZEBRA
+schedule_secure_world:
+ if (*secure_is_idle) {
+ tf_l4sec_clkdm_wakeup(true);
+ *secure_is_idle = false;
+ }
+#endif
+
+ result = tf_schedule_secure_world(comm, false);
+ if (result < 0)
+ goto exit;
+ goto copy_answers;
+
+wait:
+ if (bKillable && (sigkill_pending())) {
+ if (command_status == TF_COMMAND_STATE_PENDING)
+ result = -EINTR; /* Command was not sent. */
+ else
+ /* Command was sent but no answer was received yet. */
+ result = -EIO;
+
+ dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
+ "Signal Pending while waiting. Return error %d\n",
+ current->pid, result);
+ goto exit;
+ }
+
+ if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
+ dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
+ "prepare to sleep infinitely\n", current->pid);
+ else
+ dprintk(KERN_INFO "tf_send_recv: "
+ "prepare to sleep 0x%lx jiffies\n",
+ nRelativeTimeoutJiffies);
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!(*secure_is_idle)) {
+ if (tf_schedule_secure_world(comm, true) == STATUS_PENDING) {
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ goto copy_answers;
+ }
+ tf_l4sec_clkdm_allow_idle(true);
+ *secure_is_idle = true;
+ }
+#endif
+
+ /* go to sleep */
+ if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
+ dprintk(KERN_INFO
+ "tf_send_recv: timeout expired\n");
+ else
+ dprintk(KERN_INFO
+ "tf_send_recv: signal delivered\n");
+
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ goto copy_answers;
+
+exit:
+ if (wait_prepared) {
+ finish_wait(&comm->wait_queue, &wait);
+ wait_prepared = false;
+ }
+
+#ifdef CONFIG_TF_ZEBRA
+ if ((!(*secure_is_idle)) && (result != -EIO)) {
+ if (tf_schedule_secure_world(comm, true) == STATUS_PENDING)
+ goto copy_answers;
+
+ tf_l4sec_clkdm_allow_idle(true);
+ *secure_is_idle = true;
+ }
+#endif
+
+#ifdef CONFIG_FREEZER
+ current->flags &= ~(PF_FREEZER_NOSIG);
+ current->flags |= (saved_flags & PF_FREEZER_NOSIG);
+#endif
+
+ return result;
+}
+
+/*
+ * Sends the specified message through the specified communication channel.
+ *
+ * This function sends the message and waits for the corresponding answer
+ * It may return if a signal needs to be delivered.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_send_receive(struct tf_comm *comm,
+ union tf_command *command,
+ union tf_answer *answer,
+ struct tf_connection *connection,
+ bool bKillable)
+{
+ int error;
+ struct tf_answer_struct answerStructure;
+#ifdef CONFIG_SMP
+ long ret_affinity;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+#endif
+#ifdef CONFIG_TF_ZEBRA
+ bool secure_is_idle = true;
+#endif
+
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ if (command != NULL)
+ command->header.operation_id = (u32) &answerStructure;
+
+ dprintk(KERN_INFO "tf_send_receive\n");
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+ dprintk(KERN_ERR "tf_send_receive(%p): "
+ "Secure world not started\n", comm);
+
+ return -EFAULT;
+ }
+#endif
+
+ if (test_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags)) != 0) {
+ dprintk(KERN_DEBUG
+ "tf_send_receive: Flag Terminating is set\n");
+ return 0;
+ }
+
+#ifdef CONFIG_SMP
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret_affinity = sched_setaffinity(0, &local_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
+#endif
+
+
+ /*
+ * Send the command
+ */
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, bKillable
+ #ifdef CONFIG_TF_ZEBRA
+ , &secure_is_idle
+ #endif
+ );
+
+ if (!bKillable && sigkill_pending()) {
+ if ((command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
+ (answer->create_device_context.error_code ==
+ S_SUCCESS)) {
+
+ /*
+ * CREATE_DEVICE_CONTEXT was interrupted.
+ */
+ dprintk(KERN_INFO "tf_send_receive: "
+ "sending DESTROY_DEVICE_CONTEXT\n");
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ command->header.message_type =
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command->header.message_size =
+ (sizeof(struct
+ tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+ command->header.operation_id =
+ (u32) &answerStructure;
+ command->destroy_device_context.device_context =
+ answer->create_device_context.
+ device_context;
+
+ goto destroy_context;
+ }
+ }
+
+ if (error == 0) {
+ /*
+ * tf_send_recv returned Success.
+ */
+ if (command->header.message_type ==
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ } else if (command->header.message_type ==
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ }
+ } else if (error == -EINTR) {
+ /*
+ * No command was sent, return failure.
+ */
+ dprintk(KERN_ERR
+ "tf_send_receive: "
+ "tf_send_recv failed (error %d) !\n",
+ error);
+ } else if (error == -EIO) {
+ /*
+ * A command was sent but its answer is still pending.
+ */
+
+ /* means bKillable is true */
+ dprintk(KERN_ERR
+ "tf_send_receive: "
+ "tf_send_recv interrupted (error %d)."
+ "Send DESTROY_DEVICE_CONTEXT.\n", error);
+
+ /* Send the DESTROY_DEVICE_CONTEXT. */
+ answerStructure.answer = answer;
+ answerStructure.answer_copied = false;
+
+ command->header.message_type =
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command->header.message_size =
+ (sizeof(struct tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+ command->header.operation_id =
+ (u32) &answerStructure;
+ command->destroy_device_context.device_context =
+ connection->device_context;
+
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, false
+ #ifdef CONFIG_TF_ZEBRA
+ , &secure_is_idle
+ #endif
+ );
+ if (error == -EINTR) {
+ /*
+ * Another thread already sent
+ * DESTROY_DEVICE_CONTEXT.
+ * We must still wait for the answer
+ * to the original command.
+ */
+ command = NULL;
+ goto destroy_context;
+ } else {
+ /* An answer was received.
+ * Check if it is the answer
+ * to the DESTROY_DEVICE_CONTEXT.
+ */
+ spin_lock(&comm->lock);
+ if (answer->header.message_type !=
+ TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
+ answerStructure.answer_copied = false;
+ }
+ spin_unlock(&comm->lock);
+ if (!answerStructure.answer_copied) {
+ /* Answer to DESTROY_DEVICE_CONTEXT
+ * was not yet received.
+ * Wait for the answer.
+ */
+ dprintk(KERN_INFO
+ "[pid=%d] tf_send_receive:"
+ "Answer to DESTROY_DEVICE_CONTEXT"
+ "not yet received.Retry\n",
+ current->pid);
+ command = NULL;
+ goto destroy_context;
+ }
+ }
+ }
+
+ dprintk(KERN_INFO "tf_send_receive(): Message answer ready\n");
+ goto exit;
+
+destroy_context:
+ error = tf_send_recv(comm,
+ command, &answerStructure, connection, false
+ #ifdef CONFIG_TF_ZEBRA
+ , &secure_is_idle
+ #endif
+ );
+
+ /*
+ * tf_send_recv cannot return an error because
+ * it's not killable and not within a connection
+ */
+ BUG_ON(error != 0);
+
+ /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+exit:
+
+#ifdef CONFIG_SMP
+ ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
+#endif
+ return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+
+/*
+ * Handles all the power management calls.
+ * The operation is the type of power management
+ * operation to be performed.
+ *
+ * This routine will only return if a failure occured or if
+ * the required opwer management is of type "resume".
+ * "Hibernate" and "Shutdown" should lock when doing the
+ * corresponding SMC to the Secure World
+ */
+int tf_power_management(struct tf_comm *comm,
+ enum TF_POWER_OPERATION operation)
+{
+ u32 status;
+ int error = 0;
+
+ dprintk(KERN_INFO "tf_power_management(%d)\n", operation);
+
+#ifdef CONFIG_TF_ZEBRA
+ if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+ dprintk(KERN_INFO "tf_power_management(%p): "
+ "succeeded (not started)\n", comm);
+
+ return 0;
+ }
+#endif
+
+ status = ((tf_read_reg32(&(comm->pBuffer->status_s))
+ & TF_STATUS_POWER_STATE_MASK)
+ >> TF_STATUS_POWER_STATE_SHIFT);
+
+ switch (operation) {
+ case TF_POWER_OPERATION_SHUTDOWN:
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ error = tf_pm_shutdown(comm);
+
+ if (error) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case TF_POWER_OPERATION_HIBERNATE:
+ switch (status) {
+ case TF_POWER_MODE_ACTIVE:
+ error = tf_pm_hibernate(comm);
+
+ if (error) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+
+ default:
+ goto not_allowed;
+ }
+ break;
+
+ case TF_POWER_OPERATION_RESUME:
+ error = tf_pm_resume(comm);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Failed with error code 0x%08x\n",
+ error);
+ goto error;
+ }
+ break;
+ }
+
+ dprintk(KERN_INFO "tf_power_management(): succeeded\n");
+ return 0;
+
+not_allowed:
+ dprintk(KERN_ERR "tf_power_management(): "
+ "Power command not allowed in current "
+ "Secure World state %d\n", status);
+ error = -ENOTTY;
+error:
+ return error;
+}
diff --git a/security/smc/tf_comm.h b/security/smc/tf_comm.h
new file mode 100644
index 0000000..48bd934
--- /dev/null
+++ b/security/smc/tf_comm.h
@@ -0,0 +1,204 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_COMM_H__
+#define __TF_COMM_H__
+
+#include "tf_defs.h"
+#include "tf_protocol.h"
+
+/*----------------------------------------------------------------------------
+ * Misc
+ *----------------------------------------------------------------------------*/
+
+void tf_set_current_time(struct tf_comm *comm);
+
+/*
+ * Atomic accesses to 32-bit variables in the L1 Shared buffer
+ */
+static inline u32 tf_read_reg32(const u32 *comm_buffer)
+{
+ u32 result;
+
+ __asm__ __volatile__("@ tf_read_reg32\n"
+ "ldrex %0, [%1]\n"
+ : "=&r" (result)
+ : "r" (comm_buffer)
+ );
+
+ return result;
+}
+
+static inline void tf_write_reg32(void *comm_buffer, u32 value)
+{
+ u32 tmp;
+
+ __asm__ __volatile__("@ tf_write_reg32\n"
+ "1: ldrex %0, [%2]\n"
+ " strex %0, %1, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b"
+ : "=&r" (tmp)
+ : "r" (value), "r" (comm_buffer)
+ : "cc"
+ );
+}
+
+/*
+ * Atomic accesses to 64-bit variables in the L1 Shared buffer
+ */
+static inline u64 tf_read_reg64(void *comm_buffer)
+{
+ u64 result;
+
+ __asm__ __volatile__("@ tf_read_reg64\n"
+ "ldrexd %0, [%1]\n"
+ : "=&r" (result)
+ : "r" (comm_buffer)
+ );
+
+ return result;
+}
+
+static inline void tf_write_reg64(void *comm_buffer, u64 value)
+{
+ u64 tmp;
+
+ __asm__ __volatile__("@ tf_write_reg64\n"
+ "1: ldrexd %0, [%2]\n"
+ " strexd %0, %1, [%2]\n"
+ " teq %0, #0\n"
+ " bne 1b"
+ : "=&r" (tmp)
+ : "r" (value), "r" (comm_buffer)
+ : "cc"
+ );
+}
+
+/*----------------------------------------------------------------------------
+ * SMC operations
+ *----------------------------------------------------------------------------*/
+
+/* RPC return values */
+#define RPC_NO 0x00 /* No RPC to execute */
+#define RPC_YIELD 0x01 /* Yield RPC */
+#define RPC_NON_YIELD 0x02 /* non-Yield RPC */
+
+int tf_rpc_execute(struct tf_comm *comm);
+
+/*----------------------------------------------------------------------------
+ * Shared memory related operations
+ *----------------------------------------------------------------------------*/
+
+#define L1_DESCRIPTOR_FAULT (0x00000000)
+#define L2_DESCRIPTOR_FAULT (0x00000000)
+
+#define L2_DESCRIPTOR_ADDR_MASK (0xFFFFF000)
+
+#define DESCRIPTOR_V13_12_MASK (0x3 << PAGE_SHIFT)
+#define DESCRIPTOR_V13_12_GET(a) ((a & DESCRIPTOR_V13_12_MASK) >> PAGE_SHIFT)
+
+struct tf_coarse_page_table *tf_alloc_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ u32 type);
+
+void tf_free_coarse_page_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_coarse_page_table *coarse_pg_table,
+ int force);
+
+void tf_init_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context);
+
+void tf_release_coarse_page_table_allocator(
+ struct tf_coarse_page_table_allocation_context *alloc_context);
+
+struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor);
+
+u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm);
+
+void tf_cleanup_shared_memory(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup);
+
+int tf_fill_descriptor_table(
+ struct tf_coarse_page_table_allocation_context *alloc_context,
+ struct tf_shmem_desc *shmem_desc,
+ u32 buffer,
+ struct vm_area_struct **vmas,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 buffer_size,
+ u32 *buffer_start_offset,
+ bool in_user_space,
+ u32 flags,
+ u32 *descriptor_count);
+
+/*----------------------------------------------------------------------------
+ * Standard communication operations
+ *----------------------------------------------------------------------------*/
+
+#define STATUS_PENDING 0x00000001
+
+int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit);
+
+int tf_send_receive(
+ struct tf_comm *comm,
+ union tf_command *command,
+ union tf_answer *answer,
+ struct tf_connection *connection,
+ bool bKillable);
+
+
+/**
+ * get a pointer to the secure world description.
+ * This points directly into the L1 shared buffer
+ * and is valid only once the communication has
+ * been initialized
+ **/
+u8 *tf_get_description(struct tf_comm *comm);
+
+/*----------------------------------------------------------------------------
+ * Power management
+ *----------------------------------------------------------------------------*/
+
+enum TF_POWER_OPERATION {
+ TF_POWER_OPERATION_HIBERNATE = 1,
+ TF_POWER_OPERATION_SHUTDOWN = 2,
+ TF_POWER_OPERATION_RESUME = 3,
+};
+
+int tf_pm_hibernate(struct tf_comm *comm);
+int tf_pm_resume(struct tf_comm *comm);
+int tf_pm_shutdown(struct tf_comm *comm);
+
+int tf_power_management(struct tf_comm *comm,
+ enum TF_POWER_OPERATION operation);
+
+
+/*----------------------------------------------------------------------------
+ * Communication initialization and termination
+ *----------------------------------------------------------------------------*/
+
+int tf_init(struct tf_comm *comm);
+
+void tf_terminate(struct tf_comm *comm);
+
+
+#endif /* __TF_COMM_H__ */
diff --git a/security/smc/tf_comm_mshield.c b/security/smc/tf_comm_mshield.c
new file mode 100644
index 0000000..b148e6c
--- /dev/null
+++ b/security/smc/tf_comm_mshield.c
@@ -0,0 +1,1011 @@
+/**
+ * Copyright (c) 2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <asm/cputype.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/jiffies.h>
+#include <linux/dma-mapping.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+
+#include "tf_defs.h"
+#include "tf_comm.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+#include "tf_zebra.h"
+#include "tf_crypto.h"
+
+/*--------------------------------------------------------------------------
+ * Internal constants
+ *-------------------------------------------------------------------------- */
+
+/* RPC commands */
+#define RPC_CMD_YIELD 0x00
+#define RPC_CMD_INIT 0x01
+#define RPC_CMD_TRACE 0x02
+
+/* RPC return values to secure world */
+#define RPC_SUCCESS 0x00000000
+#define RPC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define RPC_ERROR_CONNECTION_PROTOCOL 0xFFFF3020
+
+/*
+ * RPC call status
+ *
+ * 0: the secure world yielded due to an interrupt
+ * 1: the secure world yielded on an RPC (no public world thread is handling it)
+ * 2: the secure world yielded on an RPC and the response to that RPC is now in
+ * place
+ */
+#define RPC_ADVANCEMENT_NONE 0
+#define RPC_ADVANCEMENT_PENDING 1
+#define RPC_ADVANCEMENT_FINISHED 2
+
+u32 g_RPC_advancement;
+u32 g_RPC_parameters[4] = {0, 0, 0, 0};
+u32 g_secure_task_id;
+u32 g_service_end;
+
+/*
+ * Secure ROMCode HAL API Identifiers
+ */
+#define API_HAL_SDP_RUNTIMEINIT_INDEX 0x04
+#define API_HAL_LM_PALOAD_INDEX 0x05
+#define API_HAL_LM_PAUNLOADALL_INDEX 0x07
+#define API_HAL_TASK_MGR_RPCINIT_INDEX 0x08
+#define API_HAL_KM_GETSECUREROMCODECRC_INDEX 0x0B
+#define API_HAL_SEC_L3_RAM_RESIZE_INDEX 0x17
+
+#define API_HAL_RET_VALUE_OK 0x0
+
+/* SE entry flags */
+#define FLAG_START_HAL_CRITICAL 0x4
+#define FLAG_IRQFIQ_MASK 0x3
+#define FLAG_IRQ_ENABLE 0x2
+#define FLAG_FIQ_ENABLE 0x1
+
+#define SMICODEPUB_IRQ_END 0xFE
+#define SMICODEPUB_FIQ_END 0xFD
+#define SMICODEPUB_RPC_END 0xFC
+
+#define SEC_RAM_SIZE_40KB 0x0000A000
+#define SEC_RAM_SIZE_48KB 0x0000C000
+#define SEC_RAM_SIZE_52KB 0x0000D000
+#define SEC_RAM_SIZE_60KB 0x0000F000
+#define SEC_RAM_SIZE_64KB 0x00010000
+
+struct tf_ns_pa_info {
+ void *certificate;
+ void *parameters;
+ void *results;
+};
+
+/*
+ * AFY: I would like to remove the L0 buffer altogether:
+ * - you can use the L1 shared buffer to pass the RPC parameters and results:
+ * I think these easily fit in 256 bytes and you can use the area at
+ * offset 0x2C0-0x3BF in the L1 shared buffer
+ */
+struct tf_init_buffer {
+ u32 init_status;
+ u32 protocol_version;
+ u32 l1_shared_buffer_descr;
+ u32 backing_store_addr;
+ u32 backext_storage_addr;
+ u32 workspace_addr;
+ u32 workspace_size;
+ u32 properties_length;
+ u8 properties_buffer[1];
+};
+
+#ifdef CONFIG_HAS_WAKELOCK
+static struct wake_lock g_tf_wake_lock;
+static u32 tf_wake_lock_count = 0;
+#endif
+
+static struct clockdomain *smc_l4_sec_clkdm;
+static u32 smc_l4_sec_clkdm_use_count = 0;
+
+static int __init tf_early_init(void)
+{
+ g_secure_task_id = 0;
+
+ dprintk(KERN_INFO "SMC early init\n");
+
+ smc_l4_sec_clkdm = clkdm_lookup("l4_secure_clkdm");
+ if (smc_l4_sec_clkdm == NULL)
+ return -EFAULT;
+
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&g_tf_wake_lock, WAKE_LOCK_SUSPEND,
+ TF_DEVICE_BASE_NAME);
+#endif
+
+ return 0;
+}
+early_initcall(tf_early_init);
+
+/*
+ * Function responsible for formatting parameters to pass from NS world to
+ * S world
+ */
+u32 omap4_secure_dispatcher(u32 app_id, u32 flags, u32 nargs,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4)
+{
+ u32 ret;
+ unsigned long iflags;
+ u32 pub2sec_args[5] = {0, 0, 0, 0, 0};
+
+ /*dprintk(KERN_INFO "omap4_secure_dispatcher: "
+ "app_id=0x%08x, flags=0x%08x, nargs=%u\n",
+ app_id, flags, nargs);*/
+
+ /*if (nargs != 0)
+ dprintk(KERN_INFO
+ "omap4_secure_dispatcher: args=%08x, %08x, %08x, %08x\n",
+ arg1, arg2, arg3, arg4);*/
+
+ pub2sec_args[0] = nargs;
+ pub2sec_args[1] = arg1;
+ pub2sec_args[2] = arg2;
+ pub2sec_args[3] = arg3;
+ pub2sec_args[4] = arg4;
+
+ /* Make sure parameters are visible to the secure world */
+ dmac_flush_range((void *)pub2sec_args,
+ (void *)(((u32)(pub2sec_args)) + 5*sizeof(u32)));
+ outer_clean_range(__pa(pub2sec_args),
+ __pa(pub2sec_args) + 5*sizeof(u32));
+ wmb();
+
+ /*
+ * Put L4 Secure clock domain to SW_WKUP so that modules are accessible
+ */
+ tf_l4sec_clkdm_wakeup(false);
+
+ local_irq_save(iflags);
+#ifdef DEBUG
+ BUG_ON((read_mpidr() & 0x00000003) != 0);
+#endif
+ /* proc_id is always 0 */
+ ret = schedule_secure_world(app_id, 0, flags, __pa(pub2sec_args));
+ local_irq_restore(iflags);
+
+ /* Restore the HW_SUP on L4 Sec clock domain so hardware can idle */
+ tf_l4sec_clkdm_allow_idle(false);
+
+ /*dprintk(KERN_INFO "omap4_secure_dispatcher()\n");*/
+
+ return ret;
+}
+
+/* Yields the Secure World */
+int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit)
+{
+ int status = 0;
+ int ret;
+ unsigned long iflags;
+ u32 appli_id;
+
+ tf_set_current_time(comm);
+
+ local_irq_save(iflags);
+
+ switch (g_RPC_advancement) {
+ case RPC_ADVANCEMENT_NONE:
+ /* Return from IRQ */
+ appli_id = SMICODEPUB_IRQ_END;
+ if (prepare_exit)
+ status = STATUS_PENDING;
+ break;
+ case RPC_ADVANCEMENT_PENDING:
+ /* nothing to do in this case */
+ goto exit;
+ default:
+ case RPC_ADVANCEMENT_FINISHED:
+ if (prepare_exit)
+ goto exit;
+ appli_id = SMICODEPUB_RPC_END;
+ g_RPC_advancement = RPC_ADVANCEMENT_NONE;
+ break;
+ }
+
+ g_service_end = 1;
+ /* yield to the Secure World */
+ ret = omap4_secure_dispatcher(appli_id, /* app_id */
+ 0, 0, /* flags, nargs */
+ 0, 0, 0, 0); /* arg1, arg2, arg3, arg4 */
+ if (g_service_end != 0) {
+ dprintk(KERN_ERR "Service End ret=%X\n", ret);
+
+ if (ret == 0) {
+ dmac_flush_range((void *)comm->init_shared_buffer,
+ (void *)(((u32)(comm->init_shared_buffer)) +
+ PAGE_SIZE));
+ outer_inv_range(__pa(comm->init_shared_buffer),
+ __pa(comm->init_shared_buffer) +
+ PAGE_SIZE);
+
+ ret = ((struct tf_init_buffer *)
+ (comm->init_shared_buffer))->init_status;
+
+ dprintk(KERN_ERR "SMC PA failure ret=%X\n", ret);
+ if (ret == 0)
+ ret = -EFAULT;
+ }
+ clear_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags);
+ omap4_secure_dispatcher(API_HAL_LM_PAUNLOADALL_INDEX,
+ FLAG_START_HAL_CRITICAL, 0, 0, 0, 0, 0);
+ status = ret;
+ }
+
+exit:
+ local_irq_restore(iflags);
+
+ return status;
+}
+
+/* Initializes the SE (SDP, SRAM resize, RPC handler) */
+static int tf_se_init(struct tf_comm *comm,
+ u32 sdp_backing_store_addr, u32 sdp_bkext_store_addr)
+{
+ int error;
+ unsigned int crc;
+
+ if (comm->se_initialized) {
+ dprintk(KERN_INFO "tf_se_init: SE already initialized... "
+ "nothing to do\n");
+ return 0;
+ }
+
+ /* Secure CRC read */
+ dprintk(KERN_INFO "tf_se_init: Secure CRC Read...\n");
+
+ crc = omap4_secure_dispatcher(API_HAL_KM_GETSECUREROMCODECRC_INDEX,
+ 0, 0, 0, 0, 0, 0);
+ printk(KERN_INFO "SMC: SecureCRC=0x%08X\n", crc);
+
+ /*
+ * Flush caches before resize, just to be sure there is no
+ * pending public data writes back to SRAM that could trigger a
+ * security violation once their address space is marked as
+ * secure.
+ */
+#define OMAP4_SRAM_PA 0x40300000
+#define OMAP4_SRAM_SIZE 0xe000
+ flush_cache_all();
+ outer_flush_range(OMAP4_SRAM_PA,
+ OMAP4_SRAM_PA + OMAP4_SRAM_SIZE);
+ wmb();
+
+ /* SRAM resize */
+ dprintk(KERN_INFO "tf_se_init: SRAM resize (52KB)...\n");
+ error = omap4_secure_dispatcher(API_HAL_SEC_L3_RAM_RESIZE_INDEX,
+ FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
+ SEC_RAM_SIZE_52KB, 0, 0, 0);
+
+ if (error == API_HAL_RET_VALUE_OK) {
+ dprintk(KERN_INFO "tf_se_init: SRAM resize OK\n");
+ } else {
+ dprintk(KERN_ERR "tf_se_init: "
+ "SRAM resize failed [0x%x]\n", error);
+ goto error;
+ }
+
+ /* SDP init */
+ dprintk(KERN_INFO "tf_se_init: SDP runtime init..."
+ "(sdp_backing_store_addr=%x, sdp_bkext_store_addr=%x)\n",
+ sdp_backing_store_addr, sdp_bkext_store_addr);
+ error = omap4_secure_dispatcher(API_HAL_SDP_RUNTIMEINIT_INDEX,
+ FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 2,
+ sdp_backing_store_addr, sdp_bkext_store_addr, 0, 0);
+
+ if (error == API_HAL_RET_VALUE_OK) {
+ dprintk(KERN_INFO "tf_se_init: SDP runtime init OK\n");
+ } else {
+ dprintk(KERN_ERR "tf_se_init: "
+ "SDP runtime init failed [0x%x]\n", error);
+ goto error;
+ }
+
+ /* RPC init */
+ dprintk(KERN_INFO "tf_se_init: RPC init...\n");
+ error = omap4_secure_dispatcher(API_HAL_TASK_MGR_RPCINIT_INDEX,
+ FLAG_START_HAL_CRITICAL, 1,
+ (u32) (u32(*const) (u32, u32, u32, u32)) &rpc_handler, 0, 0, 0);
+
+ if (error == API_HAL_RET_VALUE_OK) {
+ dprintk(KERN_INFO "tf_se_init: RPC init OK\n");
+ } else {
+ dprintk(KERN_ERR "tf_se_init: "
+ "RPC init failed [0x%x]\n", error);
+ goto error;
+ }
+
+ comm->se_initialized = true;
+
+ return 0;
+
+error:
+ return -EFAULT;
+}
+
+/* Check protocol version returned by the PA */
+static u32 tf_rpc_init(struct tf_comm *comm)
+{
+ u32 protocol_version;
+ u32 rpc_error = RPC_SUCCESS;
+
+ dprintk(KERN_INFO "tf_rpc_init(%p)\n", comm);
+
+ spin_lock(&(comm->lock));
+
+ dmac_flush_range((void *)comm->init_shared_buffer,
+ (void *)(((u32)(comm->init_shared_buffer)) + PAGE_SIZE));
+ outer_inv_range(__pa(comm->init_shared_buffer),
+ __pa(comm->init_shared_buffer) + PAGE_SIZE);
+
+ protocol_version = ((struct tf_init_buffer *)
+ (comm->init_shared_buffer))->protocol_version;
+
+ if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
+ != TF_S_PROTOCOL_MAJOR_VERSION) {
+ dprintk(KERN_ERR "SMC: Unsupported SMC Protocol PA Major "
+ "Version (0x%02x, expected 0x%02x)!\n",
+ GET_PROTOCOL_MAJOR_VERSION(protocol_version),
+ TF_S_PROTOCOL_MAJOR_VERSION);
+ rpc_error = RPC_ERROR_CONNECTION_PROTOCOL;
+ } else {
+ rpc_error = RPC_SUCCESS;
+ }
+
+ spin_unlock(&(comm->lock));
+
+ register_smc_public_crypto_digest();
+ register_smc_public_crypto_aes();
+
+ return rpc_error;
+}
+
+static u32 tf_rpc_trace(struct tf_comm *comm)
+{
+ dprintk(KERN_INFO "tf_rpc_trace(%p)\n", comm);
+
+#ifdef CONFIG_SECURE_TRACE
+ spin_lock(&(comm->lock));
+ printk(KERN_INFO "SMC PA: %s",
+ comm->pBuffer->rpc_trace_buffer);
+ spin_unlock(&(comm->lock));
+#endif
+ return RPC_SUCCESS;
+}
+
+/*
+ * Handles RPC calls
+ *
+ * Returns:
+ * - RPC_NO if there was no RPC to execute
+ * - RPC_YIELD if there was a Yield RPC
+ * - RPC_NON_YIELD if there was a non-Yield RPC
+ */
+
+int tf_rpc_execute(struct tf_comm *comm)
+{
+ u32 rpc_command;
+ u32 rpc_error = RPC_NO;
+
+#ifdef DEBUG
+ BUG_ON((read_mpidr() & 0x00000003) != 0);
+#endif
+
+ /* Lock the RPC */
+ mutex_lock(&(comm->rpc_mutex));
+
+ rpc_command = g_RPC_parameters[1];
+
+ if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
+ dprintk(KERN_INFO "tf_rpc_execute: "
+ "Executing CMD=0x%x\n",
+ g_RPC_parameters[1]);
+
+ switch (rpc_command) {
+ case RPC_CMD_YIELD:
+ dprintk(KERN_INFO "tf_rpc_execute: "
+ "RPC_CMD_YIELD\n");
+
+ rpc_error = RPC_YIELD;
+ g_RPC_parameters[0] = RPC_SUCCESS;
+ break;
+
+ case RPC_CMD_TRACE:
+ rpc_error = RPC_NON_YIELD;
+ g_RPC_parameters[0] = tf_rpc_trace(comm);
+ break;
+
+ default:
+ if (tf_crypto_execute_rpc(rpc_command,
+ comm->pBuffer->rpc_cus_buffer) != 0)
+ g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS;
+ else
+ g_RPC_parameters[0] = RPC_SUCCESS;
+ rpc_error = RPC_NON_YIELD;
+ break;
+ }
+ g_RPC_advancement = RPC_ADVANCEMENT_FINISHED;
+ }
+
+ mutex_unlock(&(comm->rpc_mutex));
+
+ dprintk(KERN_INFO "tf_rpc_execute: Return 0x%x\n",
+ rpc_error);
+
+ return rpc_error;
+}
+
+/*--------------------------------------------------------------------------
+ * L4 SEC Clock domain handling
+ *-------------------------------------------------------------------------- */
+
+void tf_l4sec_clkdm_wakeup(bool wakelock)
+{
+ spin_lock(&tf_get_device()->sm.lock);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (wakelock) {
+ tf_wake_lock_count++;
+ wake_lock(&g_tf_wake_lock);
+ }
+#endif
+ smc_l4_sec_clkdm_use_count++;
+ clkdm_wakeup(smc_l4_sec_clkdm);
+ spin_unlock(&tf_get_device()->sm.lock);
+}
+
+void tf_l4sec_clkdm_allow_idle(bool wakeunlock)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&clk_lock, flags);
+ smc_l4_sec_clkdm_use_count--;
+ if (smc_l4_sec_clkdm_use_count == 0)
+ clkdm_allow_idle(smc_l4_sec_clkdm);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (wakeunlock){
+ tf_wake_lock_count--;
+ if (tf_wake_lock_count == 0)
+ wake_unlock(&g_tf_wake_lock);
+ }
+#endif
+ spin_unlock(&tf_get_device()->sm.lock);
+}
+
+/*--------------------------------------------------------------------------
+ * Power management
+ *-------------------------------------------------------------------------- */
+ /*
+ * Perform a Secure World shutdown operation.
+ * The routine does not return if the operation succeeds.
+ * the routine returns an appropriate error code if
+ * the operation fails.
+ */
+int tf_pm_shutdown(struct tf_comm *comm)
+{
+
+ int error;
+ union tf_command command;
+ union tf_answer answer;
+
+ dprintk(KERN_INFO "tf_pm_shutdown()\n");
+
+ memset(&command, 0, sizeof(command));
+
+ command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+ command.header.message_size =
+ (sizeof(struct tf_command_management) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+
+ command.management.command = TF_MANAGEMENT_SHUTDOWN;
+
+ error = tf_send_receive(
+ comm,
+ &command,
+ &answer,
+ NULL,
+ false);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_pm_shutdown(): "
+ "tf_send_receive failed (error %d)!\n",
+ error);
+ return error;
+ }
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ if (answer.header.error_code != 0)
+ dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
+ else
+ dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
+#endif
+
+ return answer.header.error_code;
+}
+
+
+int tf_pm_hibernate(struct tf_comm *comm)
+{
+ struct tf_device *dev = tf_get_device();
+
+ dprintk(KERN_INFO "tf_pm_hibernate()\n");
+
+ /*
+ * As we enter in CORE OFF, the keys are going to be cleared.
+ * Reset the global key context.
+ * When the system leaves CORE OFF, this will force the driver to go
+ * through the secure world which will reconfigure the accelerators.
+ */
+ dev->aes1_key_context = 0;
+ dev->des_key_context = 0;
+#ifndef CONFIG_SMC_KERNEL_CRYPTO
+ dev->sham1_is_public = false;
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+#define DELAYED_RESUME_NONE 0
+#define DELAYED_RESUME_PENDING 1
+#define DELAYED_RESUME_ONGOING 2
+
+static DEFINE_SPINLOCK(tf_delayed_resume_lock);
+static int tf_need_delayed_resume = DELAYED_RESUME_NONE;
+
+int tf_delayed_secure_resume(void)
+{
+ int ret;
+ union tf_command message;
+ union tf_answer answer;
+ struct tf_device *dev = tf_get_device();
+
+ spin_lock(&tf_delayed_resume_lock);
+ if (likely(tf_need_delayed_resume == DELAYED_RESUME_NONE)) {
+ spin_unlock(&tf_delayed_resume_lock);
+ return 0;
+ }
+
+ if (unlikely(tf_need_delayed_resume == DELAYED_RESUME_ONGOING)) {
+ spin_unlock(&tf_delayed_resume_lock);
+
+ /*
+ * Wait for the other caller to actually finish the delayed
+ * resume operation
+ */
+ while (tf_need_delayed_resume != DELAYED_RESUME_NONE)
+ cpu_relax();
+
+ return 0;
+ }
+
+ tf_need_delayed_resume = DELAYED_RESUME_ONGOING;
+ spin_unlock(&tf_delayed_resume_lock);
+
+ /*
+ * When the system leaves CORE OFF, HWA are configured as secure. We
+ * need them as public for the Linux Crypto API.
+ */
+ memset(&message, 0, sizeof(message));
+
+ message.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
+ message.header.message_size =
+ (sizeof(struct tf_command_management) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+ message.management.command =
+ TF_MANAGEMENT_RESUME_FROM_CORE_OFF;
+
+ ret = tf_send_receive(&dev->sm, &message, &answer, NULL, false);
+ if (ret) {
+ printk(KERN_ERR "tf_pm_resume(%p): "
+ "tf_send_receive failed (error %d)!\n",
+ &dev->sm, ret);
+
+ unregister_smc_public_crypto_digest();
+ unregister_smc_public_crypto_aes();
+ return ret;
+ }
+
+ if (answer.header.error_code) {
+ unregister_smc_public_crypto_digest();
+ unregister_smc_public_crypto_aes();
+ }
+
+ spin_lock(&tf_delayed_resume_lock);
+ tf_need_delayed_resume = DELAYED_RESUME_NONE;
+ spin_unlock(&tf_delayed_resume_lock);
+
+ return answer.header.error_code;
+}
+#endif
+
+int tf_pm_resume(struct tf_comm *comm)
+{
+
+ dprintk(KERN_INFO "tf_pm_resume()\n");
+ #if 0
+ {
+ void *workspace_va;
+ struct tf_device *dev = tf_get_device();
+ workspace_va = ioremap(dev->workspace_addr,
+ dev->workspace_size);
+ printk(KERN_INFO
+ "Read first word of workspace [0x%x]\n",
+ *(uint32_t *)workspace_va);
+ }
+ #endif
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+ spin_lock(&tf_delayed_resume_lock);
+ tf_need_delayed_resume = DELAYED_RESUME_PENDING;
+ spin_unlock(&tf_delayed_resume_lock);
+#endif
+ return 0;
+}
+
+/*--------------------------------------------------------------------------
+ * Initialization
+ *-------------------------------------------------------------------------- */
+
+int tf_init(struct tf_comm *comm)
+{
+ spin_lock_init(&(comm->lock));
+ comm->flags = 0;
+ comm->pBuffer = NULL;
+ comm->init_shared_buffer = NULL;
+
+ comm->se_initialized = false;
+
+ init_waitqueue_head(&(comm->wait_queue));
+ mutex_init(&(comm->rpc_mutex));
+
+ if (tf_crypto_init() != PUBLIC_CRYPTO_OPERATION_SUCCESS)
+ return -EFAULT;
+
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
+ register_smc_public_crypto_digest();
+ register_smc_public_crypto_aes();
+ }
+
+ return 0;
+}
+
+/* Start the SMC PA */
+int tf_start(struct tf_comm *comm,
+ u32 workspace_addr, u32 workspace_size,
+ u8 *pa_buffer, u32 pa_size,
+ u8 *properties_buffer, u32 properties_length)
+{
+ struct tf_init_buffer *init_shared_buffer = NULL;
+ struct tf_l1_shared_buffer *l1_shared_buffer = NULL;
+ u32 l1_shared_buffer_descr;
+ struct tf_ns_pa_info pa_info;
+ int ret;
+ u32 descr;
+ u32 sdp_backing_store_addr;
+ u32 sdp_bkext_store_addr;
+#ifdef CONFIG_SMP
+ long ret_affinity;
+ cpumask_t saved_cpu_mask;
+ cpumask_t local_cpu_mask = CPU_MASK_NONE;
+
+ /* OMAP4 Secure ROM Code can only be called from CPU0. */
+ cpu_set(0, local_cpu_mask);
+ sched_getaffinity(0, &saved_cpu_mask);
+ ret_affinity = sched_setaffinity(0, &local_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
+#endif
+
+ tf_l4sec_clkdm_wakeup(true);
+
+ workspace_size -= SZ_1M;
+ sdp_backing_store_addr = workspace_addr + workspace_size;
+ workspace_size -= 0x20000;
+ sdp_bkext_store_addr = workspace_addr + workspace_size;
+
+ /*
+ * Implementation notes:
+ *
+ * 1/ The PA buffer (pa_buffer)is now owned by this function.
+ * In case of error, it is responsible for releasing the buffer.
+ *
+ * 2/ The PA Info and PA Buffer will be freed through a RPC call
+ * at the beginning of the PA entry in the SE.
+ */
+
+ if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
+ dprintk(KERN_ERR "tf_start(%p): "
+ "The SMC PA is already started\n", comm);
+
+ ret = -EFAULT;
+ goto error1;
+ }
+
+ if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) {
+ dprintk(KERN_ERR "tf_start(%p): "
+ "The L1 structure size is incorrect!\n", comm);
+ ret = -EFAULT;
+ goto error1;
+ }
+
+ ret = tf_se_init(comm, sdp_backing_store_addr,
+ sdp_bkext_store_addr);
+ if (ret != 0) {
+ dprintk(KERN_ERR "tf_start(%p): "
+ "SE initialization failed\n", comm);
+ goto error1;
+ }
+
+ init_shared_buffer =
+ (struct tf_init_buffer *)
+ internal_get_zeroed_page(GFP_KERNEL);
+ if (init_shared_buffer == NULL) {
+ dprintk(KERN_ERR "tf_start(%p): "
+ "Ouf of memory!\n", comm);
+
+ ret = -ENOMEM;
+ goto error1;
+ }
+ /* Ensure the page is mapped */
+ __set_page_locked(virt_to_page(init_shared_buffer));
+
+ l1_shared_buffer =
+ (struct tf_l1_shared_buffer *)
+ internal_get_zeroed_page(GFP_KERNEL);
+
+ if (l1_shared_buffer == NULL) {
+ dprintk(KERN_ERR "tf_start(%p): "
+ "Ouf of memory!\n", comm);
+
+ ret = -ENOMEM;
+ goto error1;
+ }
+ /* Ensure the page is mapped */
+ __set_page_locked(virt_to_page(l1_shared_buffer));
+
+ dprintk(KERN_INFO "tf_start(%p): "
+ "L0SharedBuffer={0x%08x, 0x%08x}\n", comm,
+ (u32) init_shared_buffer, (u32) __pa(init_shared_buffer));
+ dprintk(KERN_INFO "tf_start(%p): "
+ "L1SharedBuffer={0x%08x, 0x%08x}\n", comm,
+ (u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer));
+
+ descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer,
+ current->mm);
+ l1_shared_buffer_descr = (
+ ((u32) __pa(l1_shared_buffer) & 0xFFFFF000) |
+ (descr & 0xFFF));
+
+ pa_info.certificate = (void *) __pa(pa_buffer);
+ pa_info.parameters = (void *) __pa(init_shared_buffer);
+ pa_info.results = (void *) __pa(init_shared_buffer);
+
+ init_shared_buffer->l1_shared_buffer_descr = l1_shared_buffer_descr;
+
+ init_shared_buffer->backing_store_addr = sdp_backing_store_addr;
+ init_shared_buffer->backext_storage_addr = sdp_bkext_store_addr;
+ init_shared_buffer->workspace_addr = workspace_addr;
+ init_shared_buffer->workspace_size = workspace_size;
+
+ init_shared_buffer->properties_length = properties_length;
+ if (properties_length == 0) {
+ init_shared_buffer->properties_buffer[0] = 0;
+ } else {
+ /* Test for overflow */
+ if ((init_shared_buffer->properties_buffer +
+ properties_length
+ > init_shared_buffer->properties_buffer) &&
+ (properties_length <=
+ init_shared_buffer->properties_length)) {
+ memcpy(init_shared_buffer->properties_buffer,
+ properties_buffer,
+ properties_length);
+ } else {
+ dprintk(KERN_INFO "tf_start(%p): "
+ "Configuration buffer size from userland is "
+ "incorrect(%d, %d)\n",
+ comm, (u32) properties_length,
+ init_shared_buffer->properties_length);
+ ret = -EFAULT;
+ goto error1;
+ }
+ }
+
+ dprintk(KERN_INFO "tf_start(%p): "
+ "System Configuration (%d bytes)\n", comm,
+ init_shared_buffer->properties_length);
+ dprintk(KERN_INFO "tf_start(%p): "
+ "Starting PA (%d bytes)...\n", comm, pa_size);
+
+ /*
+ * Make sure all data is visible to the secure world
+ */
+ dmac_flush_range((void *)init_shared_buffer,
+ (void *)(((u32)init_shared_buffer) + PAGE_SIZE));
+ outer_clean_range(__pa(init_shared_buffer),
+ __pa(init_shared_buffer) + PAGE_SIZE);
+
+ dmac_flush_range((void *)pa_buffer,
+ (void *)(pa_buffer + pa_size));
+ outer_clean_range(__pa(pa_buffer),
+ __pa(pa_buffer) + pa_size);
+
+ dmac_flush_range((void *)&pa_info,
+ (void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info)));
+ outer_clean_range(__pa(&pa_info),
+ __pa(&pa_info) + sizeof(struct tf_ns_pa_info));
+ wmb();
+
+ spin_lock(&(comm->lock));
+ comm->init_shared_buffer = init_shared_buffer;
+ comm->pBuffer = l1_shared_buffer;
+ spin_unlock(&(comm->lock));
+ init_shared_buffer = NULL;
+ l1_shared_buffer = NULL;
+
+ /*
+ * Set the OS current time in the L1 shared buffer first. The secure
+ * world uses it as itw boot reference time.
+ */
+ tf_set_current_time(comm);
+
+ /* Workaround for issue #6081 */
+ if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
+ disable_nonboot_cpus();
+
+ /*
+ * Start the SMC PA
+ */
+ ret = omap4_secure_dispatcher(API_HAL_LM_PALOAD_INDEX,
+ FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
+ __pa(&pa_info), 0, 0, 0);
+ if (ret != API_HAL_RET_VALUE_OK) {
+ printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n",
+ ret);
+ goto error2;
+ }
+
+ /* Loop until the first S Yield RPC is received */
+loop:
+ mutex_lock(&(comm->rpc_mutex));
+
+ if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
+ dprintk(KERN_INFO "tf_rpc_execute: "
+ "Executing CMD=0x%x\n",
+ g_RPC_parameters[1]);
+
+ switch (g_RPC_parameters[1]) {
+ case RPC_CMD_YIELD:
+ dprintk(KERN_INFO "tf_rpc_execute: "
+ "RPC_CMD_YIELD\n");
+ set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
+ &(comm->flags));
+ g_RPC_parameters[0] = RPC_SUCCESS;
+ break;
+
+ case RPC_CMD_INIT:
+ dprintk(KERN_INFO "tf_rpc_execute: "
+ "RPC_CMD_INIT\n");
+ g_RPC_parameters[0] = tf_rpc_init(comm);
+ break;
+
+ case RPC_CMD_TRACE:
+ g_RPC_parameters[0] = tf_rpc_trace(comm);
+ break;
+
+ default:
+ g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS;
+ break;
+ }
+ g_RPC_advancement = RPC_ADVANCEMENT_FINISHED;
+ }
+
+ mutex_unlock(&(comm->rpc_mutex));
+
+ ret = tf_schedule_secure_world(comm, false);
+ if (ret != 0) {
+ printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n",
+ ret);
+ goto error2;
+ }
+
+ if (!test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
+ goto loop;
+
+ set_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags);
+ wake_up(&(comm->wait_queue));
+ ret = 0;
+
+ #if 0
+ {
+ void *workspace_va;
+ workspace_va = ioremap(workspace_addr, workspace_size);
+ printk(KERN_INFO
+ "Read first word of workspace [0x%x]\n",
+ *(uint32_t *)workspace_va);
+ }
+ #endif
+
+ /* Workaround for issue #6081 */
+ if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
+ enable_nonboot_cpus();
+
+ goto exit;
+
+error2:
+ /* Workaround for issue #6081 */
+ if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
+ enable_nonboot_cpus();
+
+ spin_lock(&(comm->lock));
+ l1_shared_buffer = comm->pBuffer;
+ init_shared_buffer = comm->init_shared_buffer;
+ comm->pBuffer = NULL;
+ comm->init_shared_buffer = NULL;
+ spin_unlock(&(comm->lock));
+
+error1:
+ if (init_shared_buffer != NULL) {
+ __clear_page_locked(virt_to_page(init_shared_buffer));
+ internal_free_page((unsigned long) init_shared_buffer);
+ }
+ if (l1_shared_buffer != NULL) {
+ __clear_page_locked(virt_to_page(l1_shared_buffer));
+ internal_free_page((unsigned long) l1_shared_buffer);
+ }
+
+exit:
+#ifdef CONFIG_SMP
+ ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
+ if (ret_affinity != 0)
+ dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
+#endif
+
+ tf_l4sec_clkdm_allow_idle(true);
+
+ if (ret > 0)
+ ret = -EFAULT;
+
+ return ret;
+}
+
+void tf_terminate(struct tf_comm *comm)
+{
+ dprintk(KERN_INFO "tf_terminate(%p)\n", comm);
+
+ spin_lock(&(comm->lock));
+
+ tf_crypto_terminate();
+
+ spin_unlock(&(comm->lock));
+}
diff --git a/security/smc/tf_conn.c b/security/smc/tf_conn.c
new file mode 100644
index 0000000..660add6
--- /dev/null
+++ b/security/smc/tf_conn.c
@@ -0,0 +1,1567 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/types.h>
+
+#include "s_version.h"
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_comm.h"
+#include "tf_conn.h"
+
+#ifdef CONFIG_TF_ZEBRA
+#include "tf_crypto.h"
+#endif
+
+/*----------------------------------------------------------------------------
+ * Management of the shared memory blocks.
+ *
+ * Shared memory blocks are the blocks registered through
+ * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
+ *----------------------------------------------------------------------------*/
+
+/**
+ * Unmaps a shared memory
+ **/
+static void tf_unmap_shmem(
+ struct tf_connection *connection,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup)
+{
+ /* check shmem_desc contains a descriptor */
+ if (shmem_desc == NULL)
+ return;
+
+ dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc);
+
+retry:
+ mutex_lock(&(connection->shmem_mutex));
+ if (atomic_read(&shmem_desc->ref_count) > 1) {
+ /*
+ * Shared mem still in use, wait for other operations completion
+ * before actually unmapping it.
+ */
+ dprintk(KERN_INFO "Descriptor in use\n");
+ mutex_unlock(&(connection->shmem_mutex));
+ schedule();
+ goto retry;
+ }
+
+ tf_cleanup_shared_memory(
+ &(connection->cpt_alloc_context),
+ shmem_desc,
+ full_cleanup);
+
+ list_del(&(shmem_desc->list));
+
+ if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
+ (full_cleanup != 0)) {
+ internal_kfree(shmem_desc);
+
+ atomic_dec(&(connection->shmem_count));
+ } else {
+ /*
+ * This is a preallocated shared memory, add to free list
+ * Since the device context is unmapped last, it is
+ * always the first element of the free list if no
+ * device context has been created
+ */
+ shmem_desc->block_identifier = 0;
+ list_add(&(shmem_desc->list), &(connection->free_shmem_list));
+ }
+
+ mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/**
+ * Find the first available slot for a new block of shared memory
+ * and map the user buffer.
+ * Update the descriptors to L1 descriptors
+ * Update the buffer_start_offset and buffer_size fields
+ * shmem_desc is updated to the mapped shared memory descriptor
+ **/
+static int tf_map_shmem(
+ struct tf_connection *connection,
+ u32 buffer,
+ /* flags for read-write access rights on the memory */
+ u32 flags,
+ bool in_user_space,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 *buffer_start_offset,
+ u32 buffer_size,
+ struct tf_shmem_desc **shmem_desc,
+ u32 *descriptor_count)
+{
+ struct tf_shmem_desc *desc = NULL;
+ int error;
+
+ dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
+ connection,
+ (void *) buffer,
+ flags);
+
+ mutex_lock(&(connection->shmem_mutex));
+
+ /*
+ * Check the list of free shared memory
+ * is not empty
+ */
+ if (list_empty(&(connection->free_shmem_list))) {
+ if (atomic_read(&(connection->shmem_count)) ==
+ TF_SHMEM_MAX_COUNT) {
+ printk(KERN_ERR "tf_map_shmem(%p):"
+ " maximum shared memories already registered\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* no descriptor available, allocate a new one */
+
+ desc = (struct tf_shmem_desc *) internal_kmalloc(
+ sizeof(*desc), GFP_KERNEL);
+ if (desc == NULL) {
+ printk(KERN_ERR "tf_map_shmem(%p):"
+ " failed to allocate descriptor\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ /* Initialize the structure */
+ desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
+ atomic_set(&desc->ref_count, 1);
+ INIT_LIST_HEAD(&(desc->list));
+
+ atomic_inc(&(connection->shmem_count));
+ } else {
+ /* take the first free shared memory descriptor */
+ desc = list_first_entry(&(connection->free_shmem_list),
+ struct tf_shmem_desc, list);
+ list_del(&(desc->list));
+ }
+
+ /* Add the descriptor to the used list */
+ list_add(&(desc->list), &(connection->used_shmem_list));
+
+ error = tf_fill_descriptor_table(
+ &(connection->cpt_alloc_context),
+ desc,
+ buffer,
+ connection->vmas,
+ descriptors,
+ buffer_size,
+ buffer_start_offset,
+ in_user_space,
+ flags,
+ descriptor_count);
+
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_map_shmem(%p):"
+ " tf_fill_descriptor_table failed with error "
+ "code %d!\n",
+ connection,
+ error);
+ goto error;
+ }
+ desc->pBuffer = (u8 *) buffer;
+
+ /*
+ * Successful completion.
+ */
+ *shmem_desc = desc;
+ mutex_unlock(&(connection->shmem_mutex));
+ dprintk(KERN_DEBUG "tf_map_shmem: success\n");
+ return 0;
+
+
+ /*
+ * Error handling.
+ */
+error:
+ mutex_unlock(&(connection->shmem_mutex));
+ dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
+ error);
+
+ tf_unmap_shmem(
+ connection,
+ desc,
+ 0);
+
+ return error;
+}
+
+
+
+/* This function is a copy of the find_vma() function
+in linux kernel 2.6.15 version with some fixes :
+ - memory block may end on vm_end
+ - check the full memory block is in the memory area
+ - guarantee NULL is returned if no memory area is found */
+struct vm_area_struct *tf_find_vma(struct mm_struct *mm,
+ unsigned long addr, unsigned long size)
+{
+ struct vm_area_struct *vma = NULL;
+
+ dprintk(KERN_INFO
+ "tf_find_vma addr=0x%lX size=0x%lX\n", addr, size);
+
+ if (mm) {
+ /* Check the cache first. */
+ /* (Cache hit rate is typically around 35%.) */
+ vma = mm->mmap_cache;
+ if (!(vma && vma->vm_end >= (addr+size) &&
+ vma->vm_start <= addr)) {
+ struct rb_node *rb_node;
+
+ rb_node = mm->mm_rb.rb_node;
+ vma = NULL;
+
+ while (rb_node) {
+ struct vm_area_struct *vma_tmp;
+
+ vma_tmp = rb_entry(rb_node,
+ struct vm_area_struct, vm_rb);
+
+ dprintk(KERN_INFO
+ "vma_tmp->vm_start=0x%lX"
+ "vma_tmp->vm_end=0x%lX\n",
+ vma_tmp->vm_start,
+ vma_tmp->vm_end);
+
+ if (vma_tmp->vm_end >= (addr+size)) {
+ vma = vma_tmp;
+ if (vma_tmp->vm_start <= addr)
+ break;
+
+ rb_node = rb_node->rb_left;
+ } else {
+ rb_node = rb_node->rb_right;
+ }
+ }
+
+ if (vma)
+ mm->mmap_cache = vma;
+ if (rb_node == NULL)
+ vma = NULL;
+ }
+ }
+ return vma;
+}
+
+static int tf_validate_shmem_and_flags(
+ u32 shmem,
+ u32 shmem_size,
+ u32 flags)
+{
+ struct vm_area_struct *vma;
+ u32 chunk;
+
+ if (shmem_size == 0)
+ /* This is always valid */
+ return 0;
+
+ if ((shmem + shmem_size) < shmem)
+ /* Overflow */
+ return -EINVAL;
+
+ down_read(&current->mm->mmap_sem);
+
+ /*
+ * When looking for a memory address, split buffer into chunks of
+ * size=PAGE_SIZE.
+ */
+ chunk = PAGE_SIZE - (shmem & (PAGE_SIZE-1));
+ if (chunk > shmem_size)
+ chunk = shmem_size;
+
+ do {
+ vma = tf_find_vma(current->mm, shmem, chunk);
+
+ if (vma == NULL) {
+ dprintk(KERN_ERR "%s: area not found\n", __func__);
+ goto error;
+ }
+
+ if (flags & TF_SHMEM_TYPE_READ)
+ if (!(vma->vm_flags & VM_READ)) {
+ dprintk(KERN_ERR "%s: no read permission\n",
+ __func__);
+ goto error;
+ }
+ if (flags & TF_SHMEM_TYPE_WRITE)
+ if (!(vma->vm_flags & VM_WRITE)) {
+ dprintk(KERN_ERR "%s: no write permission\n",
+ __func__);
+ goto error;
+ }
+
+ shmem_size -= chunk;
+ shmem += chunk;
+ chunk = (shmem_size <= PAGE_SIZE ?
+ shmem_size : PAGE_SIZE);
+ } while (shmem_size != 0);
+
+ up_read(&current->mm->mmap_sem);
+ return 0;
+
+error:
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+}
+
+
+static int tf_map_temp_shmem(struct tf_connection *connection,
+ struct tf_command_param_temp_memref *temp_memref,
+ u32 param_type,
+ struct tf_shmem_desc **shmem_desc)
+{
+ u32 flags;
+ u32 error = S_SUCCESS;
+ bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+
+ dprintk(KERN_INFO "tf_map_temp_shmem(%p, "
+ "0x%08x[size=0x%08x], offset=0x%08x)\n",
+ connection,
+ temp_memref->descriptor,
+ temp_memref->size,
+ temp_memref->offset);
+
+ switch (param_type) {
+ case TF_PARAM_TYPE_MEMREF_TEMP_INPUT:
+ flags = TF_SHMEM_TYPE_READ;
+ break;
+ case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
+ flags = TF_SHMEM_TYPE_WRITE;
+ break;
+ case TF_PARAM_TYPE_MEMREF_TEMP_INOUT:
+ flags = TF_SHMEM_TYPE_WRITE | TF_SHMEM_TYPE_READ;
+ break;
+ default:
+ error = -EINVAL;
+ goto error;
+ }
+
+ if (temp_memref->descriptor == 0) {
+ /* NULL tmpref */
+ temp_memref->offset = 0;
+ *shmem_desc = NULL;
+ } else if ((temp_memref->descriptor != 0) &&
+ (temp_memref->size == 0)) {
+ /* Empty tmpref */
+ temp_memref->offset = temp_memref->descriptor;
+ temp_memref->descriptor = 0;
+ temp_memref->size = 0;
+ *shmem_desc = NULL;
+ } else {
+ /* Map the temp shmem block */
+
+ u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+ u32 descriptorCount;
+
+ if (in_user_space) {
+ error = tf_validate_shmem_and_flags(
+ temp_memref->descriptor,
+ temp_memref->size,
+ flags);
+ if (error != 0)
+ goto error;
+ }
+
+ error = tf_map_shmem(
+ connection,
+ temp_memref->descriptor,
+ flags,
+ in_user_space,
+ shared_mem_descriptors,
+ &(temp_memref->offset),
+ temp_memref->size,
+ shmem_desc,
+ &descriptorCount);
+ temp_memref->descriptor = shared_mem_descriptors[0];
+ }
+
+error:
+ return error;
+}
+
+/*
+ * Clean up a list of shared memory descriptors.
+ */
+static void tf_shared_memory_cleanup_list(
+ struct tf_connection *connection,
+ struct list_head *shmem_desc_list)
+{
+ while (!list_empty(shmem_desc_list)) {
+ struct tf_shmem_desc *shmem_desc;
+
+ shmem_desc = list_first_entry(shmem_desc_list,
+ struct tf_shmem_desc, list);
+
+ tf_unmap_shmem(connection, shmem_desc, 1);
+ }
+}
+
+
+/*
+ * Clean up the shared memory information in the connection.
+ * Releases all allocated pages.
+ */
+static void tf_cleanup_shared_memories(struct tf_connection *connection)
+{
+ /* clean up the list of used and free descriptors.
+ * done outside the mutex, because tf_unmap_shmem already
+ * mutex()ed
+ */
+ tf_shared_memory_cleanup_list(connection,
+ &connection->used_shmem_list);
+ tf_shared_memory_cleanup_list(connection,
+ &connection->free_shmem_list);
+
+ mutex_lock(&(connection->shmem_mutex));
+
+ /* Free the Vmas page */
+ if (connection->vmas) {
+ internal_free_page((unsigned long) connection->vmas);
+ connection->vmas = NULL;
+ }
+
+ tf_release_coarse_page_table_allocator(
+ &(connection->cpt_alloc_context));
+
+ mutex_unlock(&(connection->shmem_mutex));
+}
+
+
+/*
+ * Initialize the shared memory in a connection.
+ * Allocates the minimum memory to be provided
+ * for shared memory management
+ */
+int tf_init_shared_memory(struct tf_connection *connection)
+{
+ int error;
+ int i;
+ int coarse_page_index;
+
+ /*
+ * We only need to initialize special elements and attempt to allocate
+ * the minimum shared memory descriptors we want to support
+ */
+
+ mutex_init(&(connection->shmem_mutex));
+ INIT_LIST_HEAD(&(connection->free_shmem_list));
+ INIT_LIST_HEAD(&(connection->used_shmem_list));
+ atomic_set(&(connection->shmem_count), 0);
+
+ tf_init_coarse_page_table_allocator(
+ &(connection->cpt_alloc_context));
+
+
+ /*
+ * Preallocate 3 pages to increase the chances that a connection
+ * succeeds in allocating shared mem
+ */
+ for (i = 0;
+ i < 3;
+ i++) {
+ struct tf_shmem_desc *shmem_desc =
+ (struct tf_shmem_desc *) internal_kmalloc(
+ sizeof(*shmem_desc), GFP_KERNEL);
+
+ if (shmem_desc == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p):"
+ " failed to pre allocate descriptor %d\n",
+ connection,
+ i);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ for (coarse_page_index = 0;
+ coarse_page_index < TF_MAX_COARSE_PAGES;
+ coarse_page_index++) {
+ struct tf_coarse_page_table *coarse_pg_table;
+
+ coarse_pg_table = tf_alloc_coarse_page_table(
+ &(connection->cpt_alloc_context),
+ TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
+
+ if (coarse_pg_table == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p)"
+ ": descriptor %d coarse page %d - "
+ "tf_alloc_coarse_page_table() "
+ "failed\n",
+ connection,
+ i,
+ coarse_page_index);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ shmem_desc->coarse_pg_table[coarse_page_index] =
+ coarse_pg_table;
+ }
+ shmem_desc->coarse_pg_table_count = 0;
+
+ shmem_desc->type = TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
+ atomic_set(&shmem_desc->ref_count, 1);
+
+ /*
+ * add this preallocated descriptor to the list of free
+ * descriptors Keep the device context specific one at the
+ * beginning of the list
+ */
+ INIT_LIST_HEAD(&(shmem_desc->list));
+ list_add_tail(&(shmem_desc->list),
+ &(connection->free_shmem_list));
+ }
+
+ /* allocate memory for the vmas structure */
+ connection->vmas =
+ (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
+ if (connection->vmas == NULL) {
+ printk(KERN_ERR "tf_init_shared_memory(%p):"
+ " vmas - failed to get_zeroed_page\n",
+ connection);
+ error = -ENOMEM;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ tf_cleanup_shared_memories(connection);
+ return error;
+}
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+ struct tf_connection *connection)
+{
+ union tf_command command;
+ union tf_answer answer;
+ int error = 0;
+
+ dprintk(KERN_INFO "tf_create_device_context(%p)\n",
+ connection);
+
+ command.create_device_context.message_type =
+ TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
+ command.create_device_context.message_size =
+ (sizeof(struct tf_command_create_device_context)
+ - sizeof(struct tf_command_header))/sizeof(u32);
+ command.create_device_context.operation_id = (u32) &answer;
+ command.create_device_context.device_context_id = (u32) connection;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ &command,
+ &answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer.create_device_context.error_code != S_SUCCESS))
+ goto error;
+
+ /*
+ * CREATE_DEVICE_CONTEXT succeeded,
+ * store device context handler and update connection status
+ */
+ connection->device_context =
+ answer.create_device_context.device_context;
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_create_device_context(%p):"
+ " device_context=0x%08x\n",
+ connection,
+ answer.create_device_context.device_context);
+ return 0;
+
+error:
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_create_device_context failed with "
+ "error %d\n", error);
+ } else {
+ /*
+ * We sent a DeviceCreateContext. The state is now
+ * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
+ * reset if we ever want to send a DeviceCreateContext again
+ */
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+ dprintk(KERN_ERR "tf_create_device_context failed with "
+ "error_code 0x%08X\n",
+ answer.create_device_context.error_code);
+ if (answer.create_device_context.error_code ==
+ S_ERROR_OUT_OF_MEMORY)
+ error = -ENOMEM;
+ else
+ error = -EFAULT;
+ }
+
+ return error;
+}
+
+/* Check that the current application belongs to the
+ * requested GID */
+static bool tf_check_gid(gid_t requested_gid)
+{
+ if (requested_gid == current_egid()) {
+ return true;
+ } else {
+ u32 size;
+ u32 i;
+ /* Look in the supplementary GIDs */
+ get_group_info(GROUP_INFO);
+ size = GROUP_INFO->ngroups;
+ for (i = 0; i < size; i++)
+ if (requested_gid == GROUP_AT(GROUP_INFO , i))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Opens a client session to the Secure World
+ */
+int tf_open_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc[4] = {NULL};
+ u32 i;
+
+ dprintk(KERN_INFO "tf_open_client_session(%p)\n", connection);
+
+ /*
+ * Initialize the message size with no login data. This will be later
+ * adjusted the the cases below
+ */
+ command->open_client_session.message_size =
+ (sizeof(struct tf_command_open_client_session) - 20
+ - sizeof(struct tf_command_header))/4;
+
+ switch (command->open_client_session.login_type) {
+ case TF_LOGIN_PUBLIC:
+ /* Nothing to do */
+ break;
+
+ case TF_LOGIN_USER:
+ /*
+ * Send the EUID of the calling application in the login data.
+ * Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_euid();
+#ifndef CONFIG_ANDROID
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_USER_LINUX_EUID;
+#else
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_USER_ANDROID_EUID;
+#endif
+
+ /* Added one word */
+ command->open_client_session.message_size += 1;
+ break;
+
+ case TF_LOGIN_GROUP: {
+ /* Check requested GID */
+ gid_t requested_gid =
+ *(u32 *) command->open_client_session.login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_GROUP: requested GID (0x%x) does "
+ "not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+#ifndef CONFIG_ANDROID
+ command->open_client_session.login_type =
+ TF_LOGIN_GROUP_LINUX_GID;
+#else
+ command->open_client_session.login_type =
+ TF_LOGIN_GROUP_ANDROID_GID;
+#endif
+
+ command->open_client_session.message_size += 1; /* GID */
+ break;
+ }
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION: {
+ /*
+ * Compute SHA-1 hash of the application fully-qualified path
+ * name. Truncate the hash to 16 bytes and send it as login
+ * data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ NULL, 0);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION:
+ /*
+ * Send the real UID of the calling application in the login
+ * data. Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+
+ command->open_client_session.login_type =
+ (u32) TF_LOGIN_APPLICATION_ANDROID_UID;
+
+ /* Added one word */
+ command->open_client_session.message_size += 1;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION_USER: {
+ /*
+ * Compute SHA-1 hash of the concatenation of the application
+ * fully-qualified path name and the EUID of the calling
+ * application. Truncate the hash to 16 bytes and send it as
+ * login data. Update message size.
+ */
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ (u8 *) &(current_euid()), sizeof(current_euid()));
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
+
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION_USER:
+ /*
+ * Send the real UID and the EUID of the calling application in
+ * the login data. Update message size.
+ */
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+ *(u32 *) &command->open_client_session.login_data[4] =
+ current_euid();
+
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
+
+ /* Added two words */
+ command->open_client_session.message_size += 2;
+ break;
+#endif
+
+#ifndef CONFIG_ANDROID
+ case TF_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Compute SHA-1 hash of the concatenation
+ * of the application fully-qualified path name and the
+ * requested GID. Update message size
+ */
+ gid_t requested_gid;
+ u8 pSHA1Hash[SHA1_DIGEST_SIZE];
+
+ requested_gid = *(u32 *) &command->open_client_session.
+ login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+
+ error = tf_hash_application_path_and_data(pSHA1Hash,
+ &requested_gid, sizeof(u32));
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_hash_application_path_and_data\n");
+ goto error;
+ }
+
+ memcpy(&command->open_client_session.login_data,
+ pSHA1Hash, 16);
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
+
+ /* 16 bytes */
+ command->open_client_session.message_size += 4;
+ break;
+ }
+#else
+ case TF_LOGIN_APPLICATION_GROUP: {
+ /*
+ * Check requested GID. Send the real UID and the requested GID
+ * in the login data. Update message size.
+ */
+ gid_t requested_gid;
+
+ requested_gid = *(u32 *) &command->open_client_session.
+ login_data;
+
+ if (!tf_check_gid(requested_gid)) {
+ dprintk(KERN_ERR "tf_open_client_session(%p) "
+ "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
+ "does not match real eGID (0x%x)"
+ "or any of the supplementary GIDs\n",
+ connection, requested_gid, current_egid());
+ error = -EACCES;
+ goto error;
+ }
+
+ *(u32 *) &command->open_client_session.login_data =
+ current_uid();
+ *(u32 *) &command->open_client_session.login_data[4] =
+ requested_gid;
+
+ command->open_client_session.login_type =
+ TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
+
+ /* Added two words */
+ command->open_client_session.message_size += 2;
+
+ break;
+ }
+#endif
+
+ case TF_LOGIN_PRIVILEGED:
+ /* A privileged login may be performed only on behalf of the
+ kernel itself or on behalf of a process with euid=0 or
+ egid=0. */
+ if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED for kernel API\n");
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED_KERNEL;
+ } else if (current_euid() != 0 && current_egid() != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ " user %d, group %d not allowed to open "
+ "session with TF_LOGIN_PRIVILEGED\n",
+ current_euid(), current_egid());
+ error = -EACCES;
+ goto error;
+ } else {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED for %u:%u\n",
+ current_euid(), current_egid());
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED;
+ }
+ break;
+
+ case TF_LOGIN_AUTHENTICATION: {
+ /*
+ * Compute SHA-1 hash of the application binary
+ * Send this hash as the login data (20 bytes)
+ */
+
+ u8 *hash;
+ hash = &(command->open_client_session.login_data[0]);
+
+ error = tf_get_current_process_hash(hash);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "error in tf_get_current_process_hash\n");
+ goto error;
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
+
+ /* 20 bytes */
+ command->open_client_session.message_size += 5;
+ break;
+ }
+
+ case TF_LOGIN_PRIVILEGED_KERNEL:
+ /* A kernel login may be performed only on behalf of the
+ kernel itself. */
+ if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
+ dprintk(KERN_DEBUG "tf_open_client_session: "
+ "TF_LOGIN_PRIVILEGED_KERNEL for kernel API\n");
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED_KERNEL;
+ } else {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ " user %d, group %d not allowed to open "
+ "session with TF_LOGIN_PRIVILEGED_KERNEL\n",
+ current_euid(), current_egid());
+ error = -EACCES;
+ goto error;
+ }
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED_KERNEL;
+ break;
+
+ default:
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "unknown login_type(%08X)\n",
+ command->open_client_session.login_type);
+ error = -EOPNOTSUPP;
+ goto error;
+ }
+
+ /* Map the temporary memory references */
+ for (i = 0; i < 4; i++) {
+ int param_type;
+ param_type = TF_GET_PARAM_TYPE(
+ command->open_client_session.param_types, i);
+ if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+ TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == TF_PARAM_TYPE_MEMREF_FLAG) {
+ /* Map temp mem ref */
+ error = tf_map_temp_shmem(connection,
+ &command->open_client_session.
+ params[i].temp_memref,
+ param_type,
+ &shmem_desc[i]);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_open_client_session: "
+ "unable to map temporary memory block "
+ "(%08X)\n", error);
+ goto error;
+ }
+ }
+ }
+
+ /* Fill the handle of the Device Context */
+ command->open_client_session.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+error:
+ /* Unmap the temporary memory references */
+ for (i = 0; i < 4; i++)
+ if (shmem_desc[i] != NULL)
+ tf_unmap_shmem(connection, shmem_desc[i], 0);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_open_client_session returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_open_client_session returns "
+ "error_code 0x%08X\n",
+ answer->open_client_session.error_code);
+
+ return error;
+}
+
+
+/*
+ * Closes a client session from the Secure World
+ */
+int tf_close_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_close_client_session(%p)\n", connection);
+
+ command->close_client_session.message_size =
+ (sizeof(struct tf_command_close_client_session) -
+ sizeof(struct tf_command_header)) / 4;
+ command->close_client_session.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_close_client_session returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_close_client_session returns "
+ "error 0x%08X\n",
+ answer->close_client_session.error_code);
+
+ return error;
+}
+
+
+/*
+ * Registers a shared memory to the Secure World
+ */
+int tf_register_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc = NULL;
+ bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
+ struct tf_command_register_shared_memory *msg =
+ &command->register_shared_memory;
+
+ dprintk(KERN_INFO "tf_register_shared_memory(%p) "
+ "%p[0x%08X][0x%08x]\n",
+ connection,
+ (void *)msg->shared_mem_descriptors[0],
+ msg->shared_mem_size,
+ (u32)msg->memory_flags);
+
+ if (in_user_space) {
+ error = tf_validate_shmem_and_flags(
+ msg->shared_mem_descriptors[0],
+ msg->shared_mem_size,
+ (u32)msg->memory_flags);
+ if (error != 0)
+ goto error;
+ }
+
+ /* Initialize message_size with no descriptors */
+ msg->message_size
+ = (sizeof(struct tf_command_register_shared_memory) -
+ sizeof(struct tf_command_header)) / 4;
+
+ /* Map the shmem block and update the message */
+ if (msg->shared_mem_size == 0) {
+ /* Empty shared mem */
+ msg->shared_mem_start_offset = msg->shared_mem_descriptors[0];
+ } else {
+ u32 descriptorCount;
+ error = tf_map_shmem(
+ connection,
+ msg->shared_mem_descriptors[0],
+ msg->memory_flags,
+ in_user_space,
+ msg->shared_mem_descriptors,
+ &(msg->shared_mem_start_offset),
+ msg->shared_mem_size,
+ &shmem_desc,
+ &descriptorCount);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_register_shared_memory: "
+ "unable to map shared memory block\n");
+ goto error;
+ }
+ msg->message_size += descriptorCount;
+ }
+
+ /*
+ * write the correct device context handle and the address of the shared
+ * memory descriptor in the message
+ */
+ msg->device_context = connection->device_context;
+ msg->block_id = (u32)shmem_desc;
+
+ /* Send the updated message */
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->register_shared_memory.error_code
+ != S_SUCCESS)) {
+ dprintk(KERN_ERR "tf_register_shared_memory: "
+ "operation failed. Unmap block\n");
+ goto error;
+ }
+
+ /* Saves the block handle returned by the secure world */
+ if (shmem_desc != NULL)
+ shmem_desc->block_identifier =
+ answer->register_shared_memory.block;
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_register_shared_memory(%p):"
+ " block_id=0x%08x block=0x%08x\n",
+ connection, msg->block_id,
+ answer->register_shared_memory.block);
+ return 0;
+
+ /* error completion */
+error:
+ tf_unmap_shmem(
+ connection,
+ shmem_desc,
+ 0);
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_register_shared_memory returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_register_shared_memory returns "
+ "error_code 0x%08X\n",
+ answer->register_shared_memory.error_code);
+
+ return error;
+}
+
+
+/*
+ * Releases a shared memory from the Secure World
+ */
+int tf_release_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_release_shared_memory(%p)\n", connection);
+
+ command->release_shared_memory.message_size =
+ (sizeof(struct tf_command_release_shared_memory) -
+ sizeof(struct tf_command_header)) / 4;
+ command->release_shared_memory.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->release_shared_memory.error_code != S_SUCCESS))
+ goto error;
+
+ /* Use block_id to get back the pointer to shmem_desc */
+ tf_unmap_shmem(
+ connection,
+ (struct tf_shmem_desc *)
+ answer->release_shared_memory.block_id,
+ 0);
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_release_shared_memory(%p):"
+ " block_id=0x%08x block=0x%08x\n",
+ connection, answer->release_shared_memory.block_id,
+ command->release_shared_memory.block);
+ return 0;
+
+
+error:
+ if (error != 0)
+ dprintk(KERN_ERR "tf_release_shared_memory returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_release_shared_memory returns "
+ "nChannelStatus 0x%08X\n",
+ answer->release_shared_memory.error_code);
+
+ return error;
+
+}
+
+
+/*
+ * Invokes a client command to the Secure World
+ */
+int tf_invoke_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+ struct tf_shmem_desc *shmem_desc[4] = {NULL};
+ int i;
+
+ dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection);
+
+ command->release_shared_memory.message_size =
+ (sizeof(struct tf_command_invoke_client_command) -
+ sizeof(struct tf_command_header)) / 4;
+
+#ifdef CONFIG_TF_ZEBRA
+ error = tf_crypto_try_shortcuted_update(connection,
+ (struct tf_command_invoke_client_command *) command,
+ (struct tf_answer_invoke_client_command *) answer);
+ if (error == 0)
+ return error;
+#endif
+
+ /* Map the tmprefs */
+ for (i = 0; i < 4; i++) {
+ int param_type = TF_GET_PARAM_TYPE(
+ command->invoke_client_command.param_types, i);
+
+ if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
+ TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
+ == TF_PARAM_TYPE_MEMREF_FLAG) {
+ /* A temporary memref: map it */
+ error = tf_map_temp_shmem(connection,
+ &command->invoke_client_command.
+ params[i].temp_memref,
+ param_type, &shmem_desc[i]);
+ if (error != 0) {
+ dprintk(KERN_ERR
+ "tf_invoke_client_command: "
+ "unable to map temporary memory "
+ "block\n (%08X)", error);
+ goto error;
+ }
+ }
+ }
+
+ command->invoke_client_command.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(&connection->dev->sm, command,
+ answer, connection, true);
+
+error:
+ /* Unmap de temp mem refs */
+ for (i = 0; i < 4; i++) {
+ if (shmem_desc[i] != NULL) {
+ dprintk(KERN_INFO "tf_invoke_client_command: "
+ "UnMatemp_memref %d\n ", i);
+
+ tf_unmap_shmem(connection, shmem_desc[i], 0);
+ }
+ }
+
+ if (error != 0)
+ dprintk(KERN_ERR "tf_invoke_client_command returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_invoke_client_command returns "
+ "error_code 0x%08X\n",
+ answer->invoke_client_command.error_code);
+
+ return error;
+}
+
+
+/*
+ * Cancels a client command from the Secure World
+ */
+int tf_cancel_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer)
+{
+ int error = 0;
+
+ dprintk(KERN_DEBUG "tf_cancel_client_command(%p)\n", connection);
+
+ command->cancel_client_operation.device_context =
+ connection->device_context;
+ command->cancel_client_operation.message_size =
+ (sizeof(struct tf_command_cancel_client_operation) -
+ sizeof(struct tf_command_header)) / 4;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ command,
+ answer,
+ connection,
+ true);
+
+ if ((error != 0) ||
+ (answer->cancel_client_operation.error_code != S_SUCCESS))
+ goto error;
+
+
+ /* successful completion */
+ return 0;
+
+error:
+ if (error != 0)
+ dprintk(KERN_ERR "tf_cancel_client_command returns %d\n",
+ error);
+ else
+ dprintk(KERN_ERR "tf_cancel_client_command returns "
+ "nChannelStatus 0x%08X\n",
+ answer->cancel_client_operation.error_code);
+
+ return error;
+}
+
+
+
+/*
+ * Destroys a device context from the Secure World
+ */
+int tf_destroy_device_context(
+ struct tf_connection *connection)
+{
+ int error;
+ /*
+ * AFY: better use the specialized tf_command_destroy_device_context
+ * structure: this will save stack
+ */
+ union tf_command command;
+ union tf_answer answer;
+
+ dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", connection);
+
+ BUG_ON(connection == NULL);
+
+ command.header.message_type = TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
+ command.header.message_size =
+ (sizeof(struct tf_command_destroy_device_context) -
+ sizeof(struct tf_command_header))/sizeof(u32);
+
+ /*
+ * fill in the device context handler
+ * it is guarantied that the first shared memory descriptor describes
+ * the device context
+ */
+ command.destroy_device_context.device_context =
+ connection->device_context;
+
+ error = tf_send_receive(
+ &connection->dev->sm,
+ &command,
+ &answer,
+ connection,
+ false);
+
+ if ((error != 0) ||
+ (answer.destroy_device_context.error_code != S_SUCCESS))
+ goto error;
+
+ spin_lock(&(connection->state_lock));
+ connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ spin_unlock(&(connection->state_lock));
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_destroy_device_context(%p)\n",
+ connection);
+ return 0;
+
+error:
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_destroy_device_context failed with "
+ "error %d\n", error);
+ } else {
+ dprintk(KERN_ERR "tf_destroy_device_context failed with "
+ "error_code 0x%08X\n",
+ answer.destroy_device_context.error_code);
+ if (answer.destroy_device_context.error_code ==
+ S_ERROR_OUT_OF_MEMORY)
+ error = -ENOMEM;
+ else
+ error = -EFAULT;
+ }
+
+ return error;
+}
+
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Opens a connection to the specified device.
+ *
+ * The placeholder referenced by connection is set to the address of the
+ * new connection; it is set to NULL upon failure.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+int tf_open(struct tf_device *dev,
+ struct file *file,
+ struct tf_connection **connection)
+{
+ int error;
+ struct tf_connection *conn = NULL;
+
+ dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection);
+
+ /*
+ * Allocate and initialize the conn.
+ * kmalloc only allocates sizeof(*conn) virtual memory
+ */
+ conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn),
+ GFP_KERNEL);
+ if (conn == NULL) {
+ printk(KERN_ERR "tf_open(): "
+ "Out of memory for conn!\n");
+ error = -ENOMEM;
+ goto error;
+ }
+
+ memset(conn, 0, sizeof(*conn));
+
+ conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
+ conn->dev = dev;
+ spin_lock_init(&(conn->state_lock));
+ atomic_set(&(conn->pending_op_count), 0);
+ INIT_LIST_HEAD(&(conn->list));
+
+ /*
+ * Initialize the shared memory
+ */
+ error = tf_init_shared_memory(conn);
+ if (error != 0)
+ goto error;
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initialize CUS specifics
+ */
+ tf_crypto_init_cus(conn);
+#endif
+
+ /*
+ * Attach the conn to the device.
+ */
+ spin_lock(&(dev->connection_list_lock));
+ list_add(&(conn->list), &(dev->connection_list));
+ spin_unlock(&(dev->connection_list_lock));
+
+ /*
+ * Successful completion.
+ */
+
+ *connection = conn;
+
+ dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error:
+ dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error);
+ /* Deallocate the descriptor pages if necessary */
+ internal_kfree(conn);
+ *connection = NULL;
+ return error;
+}
+
+
+/*
+ * Closes the specified connection.
+ *
+ * Upon return, the connection has been destroyed and cannot be used anymore.
+ *
+ * This function does nothing if connection is set to NULL.
+ */
+void tf_close(struct tf_connection *connection)
+{
+ int error;
+ enum TF_CONN_STATE state;
+
+ dprintk(KERN_DEBUG "tf_close(%p)\n", connection);
+
+ if (connection == NULL)
+ return;
+
+ /*
+ * Assumption: Linux guarantees that no other operation is in progress
+ * and that no other operation will be started when close is called
+ */
+ BUG_ON(atomic_read(&(connection->pending_op_count)) != 0);
+
+ /*
+ * Exchange a Destroy Device Context message if needed.
+ */
+ spin_lock(&(connection->state_lock));
+ state = connection->state;
+ spin_unlock(&(connection->state_lock));
+ if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) {
+ /*
+ * A DestroyDeviceContext operation was not performed. Do it
+ * now.
+ */
+ error = tf_destroy_device_context(connection);
+ if (error != 0)
+ /* avoid cleanup if destroy device context fails */
+ goto error;
+ }
+
+ /*
+ * Clean up the shared memory
+ */
+ tf_cleanup_shared_memories(connection);
+
+ spin_lock(&(connection->dev->connection_list_lock));
+ list_del(&(connection->list));
+ spin_unlock(&(connection->dev->connection_list_lock));
+
+ internal_kfree(connection);
+
+ return;
+
+error:
+ dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n",
+ connection, error);
+}
diff --git a/security/smc/tf_conn.h b/security/smc/tf_conn.h
new file mode 100644
index 0000000..d2c8261
--- /dev/null
+++ b/security/smc/tf_conn.h
@@ -0,0 +1,87 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_CONN_H__
+#define __TF_CONN_H__
+
+#include "tf_defs.h"
+
+/*
+ * Returns a pointer to the connection referenced by the
+ * specified file.
+ */
+static inline struct tf_connection *tf_conn_from_file(
+ struct file *file)
+{
+ return file->private_data;
+}
+
+/*----------------------------------------------------------------------------
+ * Connection operations to the Secure World
+ *----------------------------------------------------------------------------*/
+
+int tf_create_device_context(
+ struct tf_connection *connection);
+
+int tf_destroy_device_context(
+ struct tf_connection *connection);
+
+int tf_open_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_close_client_session(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_register_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_release_shared_memory(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_invoke_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+int tf_cancel_client_command(
+ struct tf_connection *connection,
+ union tf_command *command,
+ union tf_answer *answer);
+
+/*----------------------------------------------------------------------------
+ * Connection initialization and cleanup operations
+ *----------------------------------------------------------------------------*/
+
+int tf_open(struct tf_device *dev,
+ struct file *file,
+ struct tf_connection **connection);
+
+void tf_close(
+ struct tf_connection *connection);
+
+
+#endif /* !defined(__TF_CONN_H__) */
diff --git a/security/smc/tf_crypto.c b/security/smc/tf_crypto.c
new file mode 100644
index 0000000..7edca0f
--- /dev/null
+++ b/security/smc/tf_crypto.c
@@ -0,0 +1,1278 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_zebra.h"
+#include "tf_crypto.h"
+#include "tf_dma.h"
+
+#define IO_ADDRESS OMAP2_L4_IO_ADDRESS
+
+#define S_SUCCESS 0x00000000
+#define S_ERROR_GENERIC 0xFFFF0000
+#define S_ERROR_ACCESS_DENIED 0xFFFF0001
+#define S_ERROR_BAD_FORMAT 0xFFFF0005
+#define S_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define S_ERROR_SHORT_BUFFER 0xFFFF0010
+#define S_ERROR_UNREACHABLE 0xFFFF3013
+#define S_ERROR_SERVICE 0xFFFF1000
+
+#define CKR_OK 0x00000000
+
+#define PUBLIC_CRYPTO_TIMEOUT_CONST 0x000FFFFF
+
+#define RPC_AES1_CODE PUBLIC_CRYPTO_HWA_AES1
+#define RPC_DES_CODE PUBLIC_CRYPTO_HWA_DES
+#define RPC_SHA_CODE PUBLIC_CRYPTO_HWA_SHA
+
+#define RPC_CRYPTO_COMMAND_MASK 0x000003c0
+
+#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR 0x200
+#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_UNLOCK 0x000
+#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_LOCK 0x001
+
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT 0x240
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_AES1 RPC_AES1_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_DES RPC_DES_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_SHA RPC_SHA_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_SUSPEND 0x010
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_UNINSTALL 0x020
+
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS 0x280
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES1 RPC_AES1_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_DES RPC_DES_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_SHA RPC_SHA_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_RESUME 0x010
+
+#define RPC_CLEAR_GLOBAL_KEY_CONTEXT 0x2c0
+#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_AES 0x001
+#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_DES 0x002
+
+#define ENABLE_CLOCK true
+#define DISABLE_CLOCK false
+
+/*---------------------------------------------------------------------------*/
+/*RPC IN/OUT structures for CUS implementation */
+/*---------------------------------------------------------------------------*/
+
+struct rpc_install_shortcut_lock_accelerator_out {
+ u32 shortcut_id;
+ u32 error;
+};
+
+struct rpc_install_shortcut_lock_accelerator_in {
+ u32 device_context_id;
+ u32 client_session;
+ u32 command_id;
+ u32 key_context;
+ /**
+ *The identifier of the HWA accelerator that this shortcut uses!
+ *Possible values are:
+ *- 1 (RPC_AES1_CODE)
+ *- 4 (RPC_DES_CODE)
+ *- 8 (RPC_SHA_CODE)
+ **/
+ u32 hwa_id;
+ /**
+ *This field defines the algorithm, direction, mode, key size.
+ *It contains some of the bits of the corresponding "CTRL" register
+ *of the accelerator.
+ *
+ *More precisely:
+ *For AES1 accelerator, hwa_ctrl contains the following bits:
+ *- CTR (bit 6):
+ * when 1, selects CTR mode.
+ * when 0, selects CBC or ECB mode (according to CBC bit)
+ *- CBC (bit 5)
+ * when 1, selects CBC mode (but only if CTR=0)
+ * when 0, selects EBC mode (but only if CTR=0)
+ *- DIRECTION (bit 2)
+ * 0: decryption
+ * 1: encryption
+ *
+ *For the DES2 accelerator, hwa_ctrl contains the following bits:
+ *- CBC (bit 4): 1 for CBC, 0 for ECB
+ *- DIRECTION (bit 2): 0 for decryption, 1 for encryption
+ *
+ *For the SHA accelerator, hwa_ctrl contains the following bits:
+ *- ALGO (bit 2:1):
+ * 0x0: MD5
+ * 0x1: SHA1
+ * 0x2: SHA-224
+ * 0x3: SHA-256
+ **/
+ u32 hwa_ctrl;
+ union tf_crypto_operation_state operation_state;
+};
+
+struct rpc_lock_hwa_suspend_shortcut_out {
+ union tf_crypto_operation_state operation_state;
+};
+
+struct rpc_lock_hwa_suspend_shortcut_in {
+ u32 shortcut_id;
+};
+
+struct rpc_resume_shortcut_unlock_hwa_in {
+ u32 shortcut_id;
+ u32 aes1_key_context;
+ u32 reserved;
+ u32 des_key_context;
+ union tf_crypto_operation_state operation_state;
+};
+
+/*------------------------------------------------------------------------- */
+/*
+ * tf_get_device_context(struct cus_context *cus)
+ * search in the all the device context (connection_list) if the CUS context
+ * specified by cus exist.
+ *
+ * If it is found, return the device context where the CUS context is.
+ * If is is not found, return NULL.
+ */
+static struct tf_connection *tf_get_device_context(
+ struct cus_context *cus)
+{
+ struct tf_connection *connection = NULL;
+ struct cus_context *cusFromList = NULL;
+ struct tf_device *dev = tf_get_device();
+
+ spin_lock(&(dev->connection_list_lock));
+ list_for_each_entry(connection, &(dev->connection_list),
+ list) {
+ spin_lock(&(connection->shortcut_list_lock));
+ list_for_each_entry(cusFromList,
+ &(connection->shortcut_list), list) {
+ if ((u32)cusFromList == (u32)cus) {
+ spin_unlock(&(connection->
+ shortcut_list_lock));
+ spin_unlock(&(dev->
+ connection_list_lock));
+ return connection;
+ }
+ }
+ spin_unlock(&(connection->
+ shortcut_list_lock));
+ }
+ spin_unlock(&(dev->connection_list_lock));
+
+ /*cus does not exist */
+ return NULL;
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ * Get the shared memory from the memory block handle coming from secure.
+ * Return NULL if it does not exist.
+ */
+static struct tf_shmem_desc *tf_get_shmem_from_block_handle(
+ struct tf_connection *connection, u32 block)
+{
+ struct tf_shmem_desc *shmem_desc = NULL;
+
+ mutex_lock(&(connection->shmem_mutex));
+
+ list_for_each_entry(shmem_desc,
+ &(connection->used_shmem_list), list) {
+ if ((u32) shmem_desc->block_identifier ==
+ (u32) block) {
+ mutex_unlock(&(connection->shmem_mutex));
+ return shmem_desc;
+ }
+ }
+
+ /* block does not exist */
+ mutex_unlock(&(connection->shmem_mutex));
+
+ return NULL;
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ * HWA public lock or unlock one HWA according algo specified by hwa_id
+ */
+void tf_crypto_lock_hwa(u32 hwa_id, bool do_lock)
+{
+ struct semaphore *s = NULL;
+ struct tf_device *dev = tf_get_device();
+
+ dprintk(KERN_INFO "[pid=%d] %s: hwa_id=0x%04X do_lock=%d\n",
+ current->pid, __func__, hwa_id, do_lock);
+
+ switch (hwa_id) {
+ case RPC_AES1_CODE:
+ s = &dev->aes1_sema;
+ break;
+ case RPC_DES_CODE:
+ s = &dev->des_sema;
+ break;
+ default:
+ case RPC_SHA_CODE:
+ s = &dev->sha_sema;
+ break;
+ }
+
+ if (do_lock == LOCK_HWA) {
+ dprintk(KERN_INFO "tf_crypto_lock_hwa: "
+ "Wait for HWAID=0x%04X\n", hwa_id);
+ while (down_trylock(s))
+ cpu_relax();
+ dprintk(KERN_INFO "tf_crypto_lock_hwa: "
+ "Locked on HWAID=0x%04X\n", hwa_id);
+ } else {
+ up(s);
+ dprintk(KERN_INFO "tf_crypto_lock_hwa: "
+ "Released for HWAID=0x%04X\n", hwa_id);
+ }
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ * HWAs public lock or unlock HWA's specified in the HWA H/A/D fields of RPC
+ * command rpc_command
+ */
+static void tf_crypto_lock_hwas(u32 rpc_command, bool do_lock)
+{
+ dprintk(KERN_INFO
+ "tf_crypto_lock_hwas: rpc_command=0x%08x do_lock=%d\n",
+ rpc_command, do_lock);
+
+ /* perform the locks */
+ if (rpc_command & RPC_AES1_CODE)
+ tf_crypto_lock_hwa(RPC_AES1_CODE, do_lock);
+
+ if (rpc_command & RPC_DES_CODE)
+ tf_crypto_lock_hwa(RPC_DES_CODE, do_lock);
+
+ if (rpc_command & RPC_SHA_CODE)
+ tf_crypto_lock_hwa(RPC_SHA_CODE, do_lock);
+}
+
+/*------------------------------------------------------------------------- */
+/**
+ *Initialize the public crypto DMA channels, global HWA semaphores and handles
+ */
+u32 tf_crypto_init(void)
+{
+ struct tf_device *dev = tf_get_device();
+ u32 error = PUBLIC_CRYPTO_OPERATION_SUCCESS;
+
+ /* Initialize HWAs */
+ tf_aes_init();
+ tf_des_init();
+ tf_digest_init();
+
+ /*initialize the HWA semaphores */
+ sema_init(&dev->aes1_sema, 1);
+ sema_init(&dev->des_sema, 1);
+ sema_init(&dev->sha_sema, 1);
+
+ /*initialize the current key handle loaded in the AESn/DES HWA */
+ dev->aes1_key_context = 0;
+ dev->des_key_context = 0;
+ dev->sham1_is_public = false;
+
+ /*initialize the DMA semaphores */
+ mutex_init(&dev->sm.dma_mutex);
+
+ /*allocate DMA buffer */
+ dev->dma_buffer_length = PAGE_SIZE * 16;
+ dev->dma_buffer = dma_alloc_coherent(NULL,
+ dev->dma_buffer_length,
+ &(dev->dma_buffer_phys),
+ GFP_KERNEL);
+ if (dev->dma_buffer == NULL) {
+ printk(KERN_ERR
+ "tf_crypto_init: Out of memory for DMA buffer\n");
+ error = S_ERROR_OUT_OF_MEMORY;
+ }
+
+ return error;
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ *Initialize the device context CUS fields (shortcut semaphore and public CUS
+ *list)
+ */
+void tf_crypto_init_cus(struct tf_connection *connection)
+{
+ /*initialize the CUS list in the given device context */
+ spin_lock_init(&(connection->shortcut_list_lock));
+ INIT_LIST_HEAD(&(connection->shortcut_list));
+}
+
+/*------------------------------------------------------------------------- */
+/**
+ *Terminate the public crypto (including DMA)
+ */
+void tf_crypto_terminate(void)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (dev->dma_buffer != NULL) {
+ dma_free_coherent(NULL, dev->dma_buffer_length,
+ dev->dma_buffer,
+ dev->dma_buffer_phys);
+ dev->dma_buffer = NULL;
+ }
+
+ tf_digest_exit();
+ tf_des_exit();
+ tf_aes_exit();
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ *Perform a crypto update operation.
+ *THIS FUNCTION IS CALLED FROM THE IOCTL
+ */
+static bool tf_crypto_update(
+ struct cus_context *cus,
+ struct cus_params *params)
+{
+ bool status = true;
+ dprintk(KERN_INFO
+ "tf_crypto_update(%x): "\
+ "HWAID=0x%x, In=%p, Out=%p, Len=%u\n",
+ (uint32_t) cus, cus->hwa_id,
+ params->input_data,
+ params->output_data, params->input_data_length);
+
+ /* Enable the clock and Process Data */
+ switch (cus->hwa_id) {
+ case RPC_AES1_CODE:
+ tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+ cus->operation_state.aes.key_is_public = 0;
+ cus->operation_state.aes.CTRL = cus->hwa_ctrl;
+ status = tf_aes_update(
+ &cus->operation_state.aes,
+ params->input_data,
+ params->output_data,
+ params->input_data_length / AES_BLOCK_SIZE);
+ tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+ break;
+
+ case RPC_DES_CODE:
+ tf_crypto_enable_clock(PUBLIC_CRYPTO_DES3DES_CLOCK_REG);
+ status = tf_des_update(
+ cus->hwa_ctrl,
+ &cus->operation_state.des,
+ params->input_data,
+ params->output_data,
+ params->input_data_length / DES_BLOCK_SIZE);
+ tf_crypto_disable_clock(PUBLIC_CRYPTO_DES3DES_CLOCK_REG);
+ break;
+
+ case RPC_SHA_CODE:
+ tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+ cus->operation_state.sha.CTRL = cus->hwa_ctrl;
+ status = tf_digest_update(
+ &cus->operation_state.sha,
+ params->input_data,
+ params->input_data_length);
+ tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+ break;
+
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ dprintk(KERN_INFO "tf_crypto_update: Done\n");
+ return status;
+}
+
+/*------------------------------------------------------------------------- */
+
+/*
+ *Check if the command must be intercepted by a CUS or not.
+ *THIS FUNCTION IS CALLED FROM THE USER THREAD (ioctl).
+ *
+ *inputs: struct tf_connection *connection : current device context
+ * tf_command_invoke_client_command *command : the command
+ * bool incrementuse_count : specify if the use_count must be incremented
+ *output:
+ * struct cus_context **cus_ctx : the public CUS
+ * if it is shortcuted
+ *return: true or false
+ *
+ */
+static bool tf_crypto_is_shortcuted_command(
+ struct tf_connection *connection,
+ struct tf_command_invoke_client_command *command,
+ struct cus_context **cus_ctx,
+ bool incrementuse_count)
+{
+ struct tf_device *dev = tf_get_device();
+ struct cus_context *cus = NULL;
+ *cus_ctx = NULL;
+
+ dprintk(KERN_INFO "tf_crypto_is_shortcuted_command: "\
+ "connection=0x%08x, command=0x%08x, "\
+ "CltSession=0x%08x, CmdID=0x%08x\n",
+ (uint32_t) connection, (uint32_t) command,
+ (uint32_t) command->client_session,
+ command->client_command_identifier);
+
+ /*take shortcut_list_lock for the device context
+ *in which the message is sent <=> make sure that nobody is
+ *going to change data while processing */
+ spin_lock(&(connection->shortcut_list_lock));
+
+ /*lookup in the list of shortcuts attached to the device context for a
+ *shortcut context that contains the same client_session as the command
+ *and such that command_id is equal to client_command_identifier of the
+ *INVOKE_CLIENT_COMMAND message. If no such shortcut exists, take the
+ *standard path */
+ list_for_each_entry(
+ cus, &(connection->shortcut_list), list) {
+ dprintk(KERN_INFO
+ "tf_crypto_is_shortcuted_command: "\
+ "command_id = 0x%08x client_session = 0x%08x\n",
+ cus->command_id, cus->client_session);
+
+ if ((cus->client_session == command->client_session)
+ &&
+ (cus->command_id == command->
+ client_command_identifier)) {
+ dprintk(KERN_INFO
+ "tf_crypto_is_shortcuted_command: "\
+ "shortcut is identified\n");
+ /*find a CUS : check if is suspended or not */
+ if (cus->suspended) {
+ /*
+ * suspended of the shortcut context is set to
+ * true, it means that the secure world has
+ * suspended the shortcut to perform an update
+ * on its own. In this case, take the standard
+ * path. This should happen very rarely because
+ * the client and the service should generally
+ * communicate to avoid such a collision
+ */
+ dprintk(KERN_INFO "shortcut exists but "\
+ "suspended\n");
+ goto command_not_shortcutable;
+
+ } else {
+ dprintk(KERN_INFO "shortcut exists\n");
+ /*For AES and DES/3DES operations,
+ *provisionally determine if the accelerator
+ *is loaded with the appropriate key before
+ *deciding to enter the accelerator critical
+ *section. In most cases, if some other thread
+ *or the secure world is currently using the
+ *accelerator, the key won't change.
+ *So, if the key doesn't match now, it is
+ *likely not to match later on, so we'd better
+ *not try to enter the critical section in this
+ *case: */
+
+ if (cus->hwa_id == RPC_AES1_CODE &&
+ cus->
+ key_context != dev->
+ aes1_key_context) {
+ /*For AES operations, atomically read
+ *g_hAES1SSecureKeyContext and check if
+ *it is equal to key_context. If not,
+ *take the standard path <=> do not
+ *shortcut */
+ dprintk(KERN_INFO
+ "shortcut exists but AES key "\
+ "not correct\nkey_context="\
+ "0x%08x vs 0x%08x\n",
+ cus->key_context,
+ dev->
+ aes1_key_context);
+ goto command_not_shortcutable;
+
+ } else if (cus->hwa_id == RPC_DES_CODE
+ && cus->key_context !=
+ dev->
+ des_key_context) {
+ /*
+ * For DES/3DES atomically read
+ * des_key_context and check if
+ * it is equal to key_context. If not,
+ * take the standard path <=> do not
+ * shortcut
+ */
+ dprintk(KERN_INFO
+ "shortcut exists but DES key "
+ "not correct "
+ "des_key_context = 0x%08x"
+ " key_context0x%08x\n",
+ (u32)dev->
+ des_key_context,
+ (u32)cus->key_context);
+ goto command_not_shortcutable;
+ } else if (cus->hwa_id == RPC_SHA_CODE
+ && !dev->sham1_is_public) {
+ /*
+ * For digest operations, atomically
+ * read sham1_is_public and check if it
+ * is true. If not, no shortcut.
+ */
+ dprintk(KERN_INFO
+ "shortcut exists but SHAM1 "
+ "is not accessible in public");
+ goto command_not_shortcutable;
+ }
+ }
+
+ dprintk(KERN_INFO "shortcut exists and enable\n");
+
+ /*Shortcut has been found and context fits with
+ *thread => YES! the command can be shortcuted */
+
+ /*
+ *set the pointer on the corresponding session
+ *(eq CUS context)
+ */
+ *cus_ctx = cus;
+
+ /*
+ *increment use_count if required
+ */
+ if (incrementuse_count)
+ cus->use_count++;
+
+ /*
+ *release shortcut_list_lock
+ */
+ spin_unlock(&(connection->
+ shortcut_list_lock));
+ return true;
+ }
+ }
+
+ command_not_shortcutable:
+ /*
+ *release shortcut_list_lock
+ */
+ spin_unlock(&(connection->shortcut_list_lock));
+ *cus_ctx = NULL;
+ return false;
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ * Pre-process the client command (crypto update operation), i.e., parse the
+ * command message (decode buffers, etc.) THIS FUNCTION IS CALLED FROM THE USER
+ * THREAD (ioctl).
+ *
+ * For incorrect messages, an error is returned and the message will be sent to
+ * secure
+ */
+static bool tf_crypto_parse_command_message(struct tf_connection *connection,
+ struct cus_context *cus,
+ struct tf_command_invoke_client_command *command,
+ struct cus_params *params)
+{
+ u32 param_type;
+ u32 input_data_length;
+ u32 output_data_length;
+ u8 *input_data;
+ u8 *output_data;
+ struct tf_shmem_desc *input_shmem = NULL;
+ struct tf_shmem_desc *output_shmem = NULL;
+
+ dprintk(KERN_INFO
+ "tf_crypto_parse_command_message(%p) : Session=0x%x\n",
+ cus, cus->client_session);
+
+ if (command->params[0].temp_memref.size == 0)
+ return false;
+
+ param_type = TF_GET_PARAM_TYPE(command->param_types, 0);
+ switch (param_type) {
+ case TF_PARAM_TYPE_MEMREF_TEMP_INPUT:
+ if (command->params[0].temp_memref.descriptor == 0)
+ return false;
+
+ input_data = (u8 *) command->params[0].temp_memref.
+ descriptor;
+ input_data_length = command->params[0].temp_memref.size;
+
+ break;
+
+ case TF_PARAM_TYPE_MEMREF_INPUT:
+ input_shmem = tf_get_shmem_from_block_handle(connection,
+ command->params[0].memref.block);
+
+ if (input_shmem == NULL)
+ return false;
+ atomic_inc(&input_shmem->ref_count);
+
+ input_data = input_shmem->pBuffer +
+ command->params[0].memref.offset;
+ input_data_length = command->params[0].memref.size;
+
+ break;
+
+ default:
+ return false;
+ }
+
+ if (cus->hwa_id != RPC_SHA_CODE) {
+ if (command->params[1].temp_memref.size == 0)
+ goto err0;
+
+ /* We need an output buffer as well */
+ param_type = TF_GET_PARAM_TYPE(command->param_types, 1);
+ switch (param_type) {
+ case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
+ output_data =
+ (u8 *) command->params[1].temp_memref.
+ descriptor;
+ output_data_length =
+ command->params[1].temp_memref.size;
+
+ break;
+
+ case TF_PARAM_TYPE_MEMREF_OUTPUT:
+ if (command->params[1].temp_memref.descriptor == 0)
+ return false;
+
+ output_shmem = tf_get_shmem_from_block_handle(
+ connection, command->params[1].memref.block);
+ if (output_shmem == NULL)
+ goto err0;
+ atomic_inc(&output_shmem->ref_count);
+
+ output_data = output_shmem->pBuffer +
+ command->params[1].memref.offset;
+ output_data_length = command->params[1].memref.size;
+
+ break;
+
+ default:
+ dprintk(KERN_ERR "tf_crypto_parse_command_message: "
+ "Encrypt/decrypt operations require an output "
+ "buffer\n");
+
+ goto err0;
+ }
+
+ if (output_data_length < input_data_length) {
+ dprintk(KERN_ERR "tf_crypto_parse_command_message: "
+ "Short buffer: output_data_length = %d < "
+ "input_data_length = %d\n",
+ output_data_length, input_data_length);
+ goto err1;
+ }
+ } else {
+ output_data_length = 0;
+ output_data = NULL;
+ }
+
+ /*
+ * Check if input length is compatible with the algorithm of the
+ * shortcut
+ */
+ switch (cus->hwa_id) {
+ case RPC_AES1_CODE:
+ /* Must be multiple of the AES block size */
+ if ((input_data_length % AES_BLOCK_SIZE) != 0) {
+ dprintk(KERN_ERR
+ "tf_crypto_parse_command_message(%p): "\
+ "Input Data Length invalid [%d] for AES\n",
+ cus, input_data_length);
+ goto err1;
+ }
+ break;
+ case RPC_DES_CODE:
+ /* Must be multiple of the DES block size */
+ if ((input_data_length % DES_BLOCK_SIZE) != 0) {
+ dprintk(KERN_ERR
+ "tf_crypto_parse_command_message(%p): "\
+ "Input Data Length invalid [%d] for DES\n",
+ cus, input_data_length);
+ goto err1;
+ }
+ break;
+ default:
+ /* SHA operation: no constraint on data length */
+ break;
+ }
+
+ params->input_data = input_data;
+ params->input_data_length = input_data_length;
+ params->input_shmem = input_shmem;
+ params->output_data = output_data;
+ params->output_data_length = output_data_length;
+ params->output_shmem = output_shmem;
+
+ return true;
+
+err1:
+ if (output_shmem)
+ atomic_dec(&output_shmem->ref_count);
+err0:
+ if (input_shmem)
+ atomic_dec(&input_shmem->ref_count);
+
+ return false;
+}
+
+/*------------------------------------------------------------------------- */
+
+/*
+ *Post-process the client command (crypto update operation),
+ *i.e. copy the result into the user output buffer and release the resources.
+ *THIS FUNCTION IS CALLED FROM THE USER THREAD (ioctl).
+ */
+static void tf_crypto_write_answer(
+ struct cus_context *cus,
+ struct cus_params *params,
+ struct tf_answer_invoke_client_command *answer)
+{
+ u32 error = S_SUCCESS;
+
+ dprintk(KERN_INFO
+ "tf_crypto_write_answer(%p) : Session=0x%x\n",
+ cus, cus->client_session);
+
+ /* Generate the answer */
+ answer->message_size =
+ (sizeof(struct tf_answer_invoke_client_command) -
+ sizeof(struct tf_answer_header)) / 4;
+ answer->message_type = TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND;
+ answer->error_origin = TF_ORIGIN_TRUSTED_APP;
+ answer->operation_id = 0;
+ answer->error_code = error;
+ answer->answers[1].size.size = params->output_data_length;
+}
+
+/*------------------------------------------------------------------------- */
+
+int tf_crypto_try_shortcuted_update(struct tf_connection *connection,
+ struct tf_command_invoke_client_command *command,
+ struct tf_answer_invoke_client_command *answer)
+{
+ struct cus_context *cus = NULL;
+
+ if (tf_crypto_is_shortcuted_command(connection,
+ (struct tf_command_invoke_client_command *) command,
+ &cus, false)) {
+ u32 hwa_id = cus->hwa_id;
+
+ /* Lock HWA */
+ tf_crypto_lock_hwa(hwa_id, LOCK_HWA);
+
+ if (tf_crypto_is_shortcuted_command(connection,
+ command,
+ &cus, true)) {
+ struct cus_params cus_params;
+
+ memset(&cus_params, 0, sizeof(cus_params));
+
+ if (!tf_crypto_parse_command_message(
+ connection,
+ cus,
+ command,
+ &cus_params)) {
+ /* Decrement CUS context use count */
+ cus->use_count--;
+
+ /* Release HWA lock */
+ tf_crypto_lock_hwa(cus->hwa_id,
+ UNLOCK_HWA);
+
+ return -1;
+ }
+
+ /* Perform the update in public <=> THE shortcut */
+ if (!tf_crypto_update(cus, &cus_params)) {
+ /* Decrement CUS context use count */
+ cus->use_count--;
+
+ /* Release HWA lock */
+ tf_crypto_lock_hwa(cus->hwa_id,
+ UNLOCK_HWA);
+
+ return -1;
+ }
+
+ /* Write answer message */
+ tf_crypto_write_answer(cus,
+ &cus_params, answer);
+
+ /* Decrement registered shmems use count if needed */
+ if (cus_params.input_shmem)
+ atomic_dec(&cus_params.input_shmem->ref_count);
+ if (cus_params.output_shmem)
+ atomic_dec(&cus_params.output_shmem->ref_count);
+
+ /* Decrement CUS context use count */
+ cus->use_count--;
+
+ tf_crypto_lock_hwa(cus->hwa_id,
+ UNLOCK_HWA);
+ } else {
+ tf_crypto_lock_hwa(hwa_id, UNLOCK_HWA);
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------------- */
+
+void tf_crypto_wait_for_ready_bit_infinitely(u32 *reg, u32 bit)
+{
+ while (!(INREG32(reg) & bit))
+ ;
+}
+
+/*------------------------------------------------------------------------- */
+
+u32 tf_crypto_wait_for_ready_bit(u32 *reg, u32 bit)
+{
+ u32 timeoutCounter = PUBLIC_CRYPTO_TIMEOUT_CONST;
+
+ while ((!(INREG32(reg) & bit)) && ((--timeoutCounter) != 0))
+ ;
+
+ if (timeoutCounter == 0)
+ return PUBLIC_CRYPTO_ERR_TIMEOUT;
+
+ return PUBLIC_CRYPTO_OPERATION_SUCCESS;
+}
+
+/*------------------------------------------------------------------------- */
+
+static DEFINE_SPINLOCK(clk_lock);
+
+void tf_crypto_disable_clock(uint32_t clock_paddr)
+{
+ u32 *clock_reg;
+ u32 val;
+ unsigned long flags;
+
+ dprintk(KERN_INFO "tf_crypto_disable_clock: " \
+ "clock_paddr=0x%08X\n",
+ clock_paddr);
+
+ /* Ensure none concurrent access when changing clock registers */
+ spin_lock_irqsave(&clk_lock, flags);
+
+ clock_reg = (u32 *)IO_ADDRESS(clock_paddr);
+
+ val = __raw_readl(clock_reg);
+ val &= ~(0x3);
+ __raw_writel(val, clock_reg);
+
+ /* Wait for clock to be fully disabled */
+ while ((__raw_readl(clock_reg) & 0x30000) == 0)
+ ;
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+
+ tf_l4sec_clkdm_allow_idle(true);
+}
+
+/*------------------------------------------------------------------------- */
+
+void tf_crypto_enable_clock(uint32_t clock_paddr)
+{
+ u32 *clock_reg;
+ u32 val;
+ unsigned long flags;
+
+ dprintk(KERN_INFO "tf_crypto_enable_clock: " \
+ "clock_paddr=0x%08X\n",
+ clock_paddr);
+
+ tf_l4sec_clkdm_wakeup(true);
+
+ /* Ensure none concurrent access when changing clock registers */
+ spin_lock_irqsave(&clk_lock, flags);
+
+ clock_reg = (u32 *)IO_ADDRESS(clock_paddr);
+
+ val = __raw_readl(clock_reg);
+ val |= 0x2;
+ __raw_writel(val, clock_reg);
+
+ /* Wait for clock to be fully enabled */
+ while ((__raw_readl(clock_reg) & 0x30000) != 0)
+ ;
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+}
+
+/*------------------------------------------------------------------------- */
+/* CUS RPCs */
+/*------------------------------------------------------------------------- */
+/*
+ * This RPC is used by the secure world to install a new shortcut. Optionally,
+ * for AES or DES/3DES operations, it can also lock the accelerator so that the
+ * secure world can install a new key in it.
+ */
+static int tf_crypto_install_shortcut_lock_hwa(
+ u32 rpc_command, void *rpc_shared_buffer)
+{
+ struct cus_context *cus = NULL;
+ struct tf_connection *connection = NULL;
+
+ /* Reference the input/ouput data */
+ struct rpc_install_shortcut_lock_accelerator_out *install_cus_out =
+ rpc_shared_buffer;
+ struct rpc_install_shortcut_lock_accelerator_in *install_cus_in =
+ rpc_shared_buffer;
+
+ dprintk(KERN_INFO "tf_crypto_install_shortcut_lock_hwa: "
+ "rpc_command=0x%08x; hwa_id=0x%08x\n",
+ rpc_command, install_cus_in->hwa_id);
+
+ connection = (struct tf_connection *)
+ install_cus_in->device_context_id;
+
+ if (connection == NULL) {
+ dprintk(KERN_INFO
+ "tf_crypto_install_shortcut_lock_hwa: "
+ "DeviceContext 0x%08x does not exist, "
+ "cannot create Shortcut\n",
+ install_cus_in->device_context_id);
+ install_cus_out->error = -1;
+ return 0;
+ }
+
+ /*
+ * Allocate a shortcut context. If the allocation fails,
+ * return S_ERROR_OUT_OF_MEMORY error code
+ */
+ cus = (struct cus_context *)
+ internal_kmalloc(sizeof(*cus), GFP_KERNEL);
+ if (cus == NULL) {
+ dprintk(KERN_ERR
+ "tf_crypto_install_shortcut_lock_hwa: "\
+ "Out of memory for public session\n");
+ install_cus_out->error = S_ERROR_OUT_OF_MEMORY;
+ return 0;
+ }
+
+ memset(cus, 0, sizeof(*cus));
+
+ /*setup the shortcut */
+ cus->magic_number = CUS_CONTEXT_MAGIC;
+ cus->client_session = install_cus_in->client_session;
+ cus->command_id = install_cus_in->command_id;
+ cus->hwa_id = install_cus_in->hwa_id;
+ cus->hwa_ctrl = install_cus_in->hwa_ctrl;
+ cus->key_context = install_cus_in->key_context;
+ cus->use_count = 0;
+ cus->suspended = false;
+
+ memcpy(&cus->operation_state,
+ &install_cus_in->operation_state,
+ sizeof(union tf_crypto_operation_state));
+
+ /*lock the shortcut_list_lock for this device context */
+ spin_lock(&connection->shortcut_list_lock);
+
+ /*Insert the shortcut in the list of shortcuts in the device context */
+ list_add(&(cus->list), &(connection->shortcut_list));
+
+ /*release shortcut_list_lock */
+ spin_unlock(&connection->shortcut_list_lock);
+
+ /*fill the output structure */
+ install_cus_out->shortcut_id = (u32) cus;
+ install_cus_out->error = S_SUCCESS;
+
+ /*If the L bit is true, then:
+ * Enter the accelerator critical section. If an update is currently in
+ * progress on the accelerator (using g_hXXXKeyContext key), this will
+ * wait until the update has completed. This is call when secure wants
+ * to install a key in HWA, once it is done secure world will release
+ * the lock. For SHA (activate shortcut is always called without LOCK
+ * fag):do nothing
+ */
+ if ((rpc_command & RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_LOCK) != 0) {
+ /*Lock the HWA */
+ tf_crypto_lock_hwa(cus->hwa_id, LOCK_HWA);
+ }
+
+ dprintk(KERN_INFO
+ "tf_crypto_install_shortcut_lock_hwa: Done\n");
+
+ return S_SUCCESS;
+}
+
+/*------------------------------------------------------------------------- */
+
+/*
+ * This RPC is used to perform one or several of the following operations
+ * - Lock one or several accelerators for the exclusive use by the secure world,
+ * either because it is going to be switched to secure or because a new key is
+ * going to be loaded in the accelerator
+ * - Suspend a shortcut, i.e., make it temporarily unavailable to the public
+ * world. This is used when a secure update is going to be performed on the
+ * operation. The answer to the RPC then contains the operation state
+ * necessary for the secure world to do the update.
+ * - Uninstall the shortcut
+ */
+static int tf_crypto_lock_hwas_suspend_shortcut(
+ u32 rpc_command, void *rpc_shared_buffer)
+{
+ u32 target_shortcut;
+ struct cus_context *cus = NULL;
+ struct tf_connection *connection = NULL;
+
+ /*reference the input/ouput data */
+ struct rpc_lock_hwa_suspend_shortcut_out *suspend_cus_out =
+ rpc_shared_buffer;
+ struct rpc_lock_hwa_suspend_shortcut_in *suspend_cus_in =
+ rpc_shared_buffer;
+
+ dprintk(KERN_INFO
+ "tf_crypto_lock_hwas_suspend_shortcut: "\
+ "suspend_cus_in=0x%08x; shortcut_id=0x%08x\n",
+ suspend_cus_in->shortcut_id, (u32)suspend_cus_in);
+
+ target_shortcut = suspend_cus_in->shortcut_id;
+
+ /*lock HWAs */
+ tf_crypto_lock_hwas(rpc_command, LOCK_HWA);
+
+ /*if suspend_cus_in->shortcut_id != 0 and if rpc_command.S != 0,
+ then, suspend shortcut */
+ if ((target_shortcut != 0) && ((rpc_command &
+ RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_SUSPEND) != 0)) {
+ /*reference the CUSContext */
+ cus = (struct cus_context *)
+ suspend_cus_in->shortcut_id;
+
+ /*preventive check1: return if shortcut does not exist */
+ connection = tf_get_device_context(cus);
+ if (connection == NULL) {
+ dprintk(KERN_INFO
+ "tf_crypto_lock_hwas_suspend_shortcut: "\
+ "shortcut_id=0x%08x does not exist, cannot suspend "\
+ "Shortcut\n",
+ suspend_cus_in->shortcut_id);
+ return -1;
+ }
+
+loop_on_suspend:
+ /*lock shortcut_list_lock associated with the
+ *device context */
+ spin_lock(&connection->shortcut_list_lock);
+
+ /*Suspend shortcut */
+ cus->suspended = true;
+
+ if (cus->use_count != 0) {
+ /*release shortcut_list_lock */
+ spin_unlock(&connection->
+ shortcut_list_lock);
+ schedule();
+ goto loop_on_suspend;
+ }
+
+ /*Copy the operation state data stored in CUS Context into the
+ *answer to the RPC output assuming that HWA register has been
+ *saved at update time */
+ memcpy(&suspend_cus_out->operation_state,
+ &cus->operation_state,
+ sizeof(union tf_crypto_operation_state));
+
+ /*Uninstall shortcut if requiered */
+ if ((rpc_command &
+ RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_UNINSTALL) != 0) {
+ dprintk(KERN_INFO
+ "tf_crypto_lock_hwas_suspend_shortcut:"\
+ "Uninstall 0x%08x\n",
+ target_shortcut);
+ list_del(&(cus->list));
+ /*list_del only remove the item in the list, the
+ *memory must be free afterward */
+ /*release the lock before calling internal_kfree */
+ spin_unlock(&connection->
+ shortcut_list_lock);
+ if (cus != NULL)
+ internal_kfree(cus);
+ return 0;
+ }
+
+ /*release shortcut_list_lock */
+ spin_unlock(&connection->shortcut_list_lock);
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------------- */
+
+/*
+ * This RPC is used to perform one or several of the following operations:
+ * - Resume a shortcut previously suspended
+ * - Inform the public driver of the new keys installed in the DES and AES
+ * accelerators
+ * - Unlock some of the accelerators
+ */
+static int tf_crypto_resume_shortcut_unlock_hwas(
+ u32 rpc_command, void *rpc_shared_buffer)
+{
+ struct tf_device *dev = tf_get_device();
+ struct tf_connection *connection = NULL;
+ struct cus_context *cus = NULL;
+
+ /*reference the input data */
+ struct rpc_resume_shortcut_unlock_hwa_in *resume_cus_in =
+ rpc_shared_buffer;
+
+ dprintk(KERN_INFO
+ "tf_crypto_resume_shortcut_unlock_hwas\n"
+ "rpc_command=0x%08x\nshortcut_id=0x%08x\n",
+ rpc_command, resume_cus_in->shortcut_id);
+
+ /*if shortcut_id not 0 resume the shortcut and unlock HWA
+ else only unlock HWA */
+ if (resume_cus_in->shortcut_id != 0) {
+ /*reference the CUSContext */
+ cus = (struct cus_context *)
+ resume_cus_in->shortcut_id;
+
+ /*preventive check1: return if shortcut does not exist
+ *else, points to the public crypto monitor (inside the device
+ *context) */
+ connection = tf_get_device_context(cus);
+ if (connection == NULL) {
+ dprintk(KERN_INFO
+ "tf_crypto_resume_shortcut_unlock_hwas(...):"\
+ "shortcut_id 0x%08x does not exist, cannot suspend "\
+ "Shortcut\n",
+ resume_cus_in->shortcut_id);
+ return -1;
+ }
+
+ /*if S set and shortcut not yet suspended */
+ if ((cus->suspended) &&
+ ((rpc_command &
+ RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_RESUME) != 0)){
+ /*Write operation_stateData in the shortcut context */
+ memcpy(&cus->operation_state,
+ &resume_cus_in->operation_state,
+ sizeof(union tf_crypto_operation_state));
+ /*resume the shortcut */
+ cus->suspended = false;
+ }
+ }
+
+ /*
+ * If A is set: Atomically set aes1_key_context to
+ * aes1_key_context
+ */
+ if ((rpc_command &
+ RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES1) != 0) {
+ dev->aes1_key_context =
+ resume_cus_in->aes1_key_context;
+ }
+
+ /*
+ * If D is set:
+ * Atomically set des_key_context to des_key_context
+ */
+ if ((rpc_command &
+ RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_DES) != 0) {
+ dev->des_key_context =
+ resume_cus_in->des_key_context;
+ }
+
+ /* H is never set by the PA: Atomically set sham1_is_public to true */
+ dev->sham1_is_public = true;
+
+ /* Unlock HWAs according rpc_command */
+ tf_crypto_lock_hwas(rpc_command, UNLOCK_HWA);
+
+ return 0;
+}
+
+/*------------------------------------------------------------------------- */
+
+/*
+ * This RPC is used to notify the public driver that the key in the AES, DES
+ * accelerators has been cleared. This happens only when the key is no longer
+ * referenced by any shortcuts. So, it is guaranteed that no-one has entered the
+ * accelerators critical section and there is no need to enter it to implement
+ * this RPC.
+ */
+static int tf_crypto_clear_global_key_context(
+ u32 rpc_command, void *rpc_shared_buffer)
+{
+ struct tf_device *dev = tf_get_device();
+
+ /*
+ * If A is set: Atomically set aes1_key_context to 0
+ */
+ if ((rpc_command &
+ RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES1) != 0) {
+ dev->aes1_key_context = 0;
+ }
+
+ /*
+ *If D is set: Atomically set des_key_context to 0
+ */
+ if ((rpc_command &
+ RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_DES) != 0) {
+ dev->des_key_context = 0;
+ }
+
+ return 0;
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ * Execute a public crypto related RPC
+ */
+
+int tf_crypto_execute_rpc(u32 rpc_command, void *rpc_shared_buffer)
+{
+ switch (rpc_command & RPC_CRYPTO_COMMAND_MASK) {
+ case RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR:
+ dprintk(KERN_INFO "RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR\n");
+ return tf_crypto_install_shortcut_lock_hwa(
+ rpc_command, rpc_shared_buffer);
+
+ case RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT:
+ dprintk(KERN_INFO "RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT\n");
+ return tf_crypto_lock_hwas_suspend_shortcut(
+ rpc_command, rpc_shared_buffer);
+
+ case RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS:
+ dprintk(KERN_INFO "RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS\n");
+ return tf_crypto_resume_shortcut_unlock_hwas(
+ rpc_command, rpc_shared_buffer);
+
+ case RPC_CLEAR_GLOBAL_KEY_CONTEXT:
+ dprintk(KERN_INFO "RPC_CLEAR_GLOBAL_KEY_CONTEXT\n");
+ return tf_crypto_clear_global_key_context(
+ rpc_command, rpc_shared_buffer);
+ }
+
+ return -1;
+}
diff --git a/security/smc/tf_crypto.h b/security/smc/tf_crypto.h
new file mode 100644
index 0000000..2291439
--- /dev/null
+++ b/security/smc/tf_crypto.h
@@ -0,0 +1,349 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_PUBLIC_CRYPTO_H
+#define __TF_PUBLIC_CRYPTO_H
+
+#include "tf_defs.h"
+#include <linux/io.h>
+#include <mach/io.h>
+
+#include <clockdomain.h>
+
+#ifdef __ASM_ARM_ARCH_OMAP_CLOCKDOMAIN_H
+#define clkdm_wakeup omap2_clkdm_wakeup
+#define clkdm_allow_idle omap2_clkdm_allow_idle
+#endif
+
+/*-------------------------------------------------------------------------- */
+
+#define PUBLIC_CRYPTO_HWA_AES1 0x1
+#define PUBLIC_CRYPTO_HWA_DES 0x4
+#define PUBLIC_CRYPTO_HWA_SHA 0x8
+
+#define OUTREG32(a, b) __raw_writel(b, a)
+#define INREG32(a) __raw_readl(a)
+#define SETREG32(x, y) OUTREG32(x, INREG32(x) | (y))
+#define CLRREG32(x, y) OUTREG32(x, INREG32(x) & ~(y))
+
+#define PUBLIC_CRYPTO_CLKSTCTRL_CLOCK_REG 0x4A009580
+#define PUBLIC_CRYPTO_AES1_CLOCK_REG 0x4A0095A0
+#define PUBLIC_CRYPTO_DES3DES_CLOCK_REG 0x4A0095B0
+#define PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG 0x4A0095C8
+
+#define BYTES_TO_LONG(a)(u32)(a[0] | (a[1]<<8) | (a[2]<<16) | (a[3]<<24))
+#define LONG_TO_BYTE(a, b) { a[0] = (u8)((b) & 0xFF); \
+ a[1] = (u8)(((b) >> 8) & 0xFF); \
+ a[2] = (u8)(((b) >> 16) & 0xFF); \
+ a[3] = (u8)(((b) >> 24) & 0xFF); }
+
+#define IS_4_BYTES_ALIGNED(x)((!((x) & 0x3)) ? true : false)
+
+#define TF_SMC_OMAP4_PUBLIC_DMA
+
+/*
+ *The size limit to trigger DMA for AES, DES and Digest.
+ *0xFFFFFFFF means "never"
+ */
+#ifdef TF_SMC_OMAP4_PUBLIC_DMA
+#define DMA_TRIGGER_IRQ_AES 128
+#define DMA_TRIGGER_IRQ_DES 128
+#define DMA_TRIGGER_IRQ_DIGEST 1024
+#else
+#define DMA_TRIGGER_IRQ_AES 0xFFFFFFFF
+#define DMA_TRIGGER_IRQ_DES 0xFFFFFFFF
+#define DMA_TRIGGER_IRQ_DIGEST 0xFFFFFFFF
+#endif
+
+/*Error code constants */
+#define PUBLIC_CRYPTO_OPERATION_SUCCESS 0x00000000
+#define PUBLIC_CRYPTO_ERR_ACCESS_DENIED 0x00000001
+#define PUBLIC_CRYPTO_ERR_OUT_OF_MEMORY 0x00000002
+#define PUBLIC_CRYPTO_ERR_BAD_PARAMETERS 0x00000003
+#define PUBLIC_CRYPTO_ERR_TIMEOUT 0x00000004
+
+/*DMA mode constants */
+#define PUBLIC_CRYPTO_DMA_USE_NONE 0x00000000 /*No DMA used*/
+/*DMA with active polling used */
+#define PUBLIC_CRYPTO_DMA_USE_POLLING 0x00000001
+#define PUBLIC_CRYPTO_DMA_USE_IRQ 0x00000002 /*DMA with IRQ used*/
+
+#define PUBLIC_CRYPTO_REG_SET_BIT(x, y) OUTREG32(x, INREG32(x) | y);
+#define PUBLIC_CRYPTO_REG_UNSET_BIT(x, y) OUTREG32(x, INREG32(x) & (~y));
+
+#define AES_BLOCK_SIZE 16
+#define DES_BLOCK_SIZE 8
+#define HASH_BLOCK_SIZE 64
+
+#define HASH_MD5_LENGTH 16
+#define HASH_SHA1_LENGTH 20
+#define HASH_SHA224_LENGTH 28
+#define HASH_SHA256_LENGTH 32
+
+#define PUBLIC_CRYPTO_DIGEST_MAX_SIZE 32
+#define PUBLIC_CRYPTO_IV_MAX_SIZE 16
+
+#define PUBLIC_CRYPTO_HW_CLOCK_ADDR (0x48004A14)
+#define PUBLIC_CRYPTO_HW_AUTOIDLE_ADDR (0x48004A34)
+
+#define PUBLIC_CRYPTO_HW_CLOCK1_ADDR (0x48004A10)
+#define PUBLIC_CRYPTO_HW_AUTOIDLE1_ADDR (0x48004A30)
+
+#define DIGEST_CTRL_ALGO_MD5 0
+#define DIGEST_CTRL_ALGO_SHA1 1
+#define DIGEST_CTRL_ALGO_SHA224 2
+#define DIGEST_CTRL_ALGO_SHA256 3
+
+/*-------------------------------------------------------------------------- */
+/*
+ *The magic word.
+ */
+#define CUS_CONTEXT_MAGIC 0x45EF683C
+
+/*-------------------------------------------------------------------------- */
+/* CUS context structure */
+/*-------------------------------------------------------------------------- */
+
+/* State of an AES operation */
+struct tf_crypto_aes_operation_state {
+ u32 AES_IV_0;
+ u32 AES_IV_1;
+ u32 AES_IV_2;
+ u32 AES_IV_3;
+
+ u32 CTRL;
+
+ /* Only used by Linux crypto API interface */
+ u32 KEY1_0;
+ u32 KEY1_1;
+ u32 KEY1_2;
+ u32 KEY1_3;
+ u32 KEY1_4;
+ u32 KEY1_5;
+ u32 KEY1_6;
+ u32 KEY1_7;
+
+ u32 key_is_public;
+};
+
+struct tf_crypto_des_operation_state {
+ u32 DES_IV_L;
+ u32 DES_IV_H;
+};
+
+#define HASH_BLOCK_BYTES_LENGTH 64
+
+struct tf_crypto_sha_operation_state {
+ /* Current digest */
+ u32 SHA_DIGEST_A;
+ u32 SHA_DIGEST_B;
+ u32 SHA_DIGEST_C;
+ u32 SHA_DIGEST_D;
+ u32 SHA_DIGEST_E;
+ u32 SHA_DIGEST_F;
+ u32 SHA_DIGEST_G;
+ u32 SHA_DIGEST_H;
+
+ /* This buffer contains a partial chunk */
+ u8 chunk_buffer[HASH_BLOCK_BYTES_LENGTH];
+
+ /* Number of bytes stored in chunk_buffer (0..64) */
+ u32 chunk_length;
+
+ /*
+ * Total number of bytes processed so far
+ * (not including the partial chunk)
+ */
+ u32 bytes_processed;
+
+ u32 CTRL;
+};
+
+union tf_crypto_operation_state {
+ struct tf_crypto_aes_operation_state aes;
+ struct tf_crypto_des_operation_state des;
+ struct tf_crypto_sha_operation_state sha;
+};
+
+/*
+ *Fully describes a public crypto operation
+ *(i.e., an operation that has a shortcut attached).
+ */
+struct cus_context {
+ /*
+ *Identifies the public crypto operation in the list of all public
+ *operations.
+ */
+ struct list_head list;
+
+ u32 magic_number; /*Must be set to
+ *{CUS_CONTEXT_MAGIC} */
+
+ /*basic fields */
+ u32 client_session;
+ u32 command_id;
+ u32 hwa_id;
+ u32 hwa_ctrl;
+ u32 key_context;
+ union tf_crypto_operation_state operation_state;
+ u32 use_count;
+ bool suspended;
+};
+
+struct cus_params {
+ /*fields for data processing of an update command */
+ u32 input_data_length;
+ u8 *input_data;
+ struct tf_shmem_desc *input_shmem;
+
+ u32 output_data_length;
+ u8 *output_data;
+ struct tf_shmem_desc *output_shmem;
+};
+
+/*-------------------------------------------------------------------------- */
+/*
+ *Public crypto API (Top level)
+ */
+
+/*
+*Initialize the public crypto DMA chanels and global HWA semaphores
+ */
+u32 tf_crypto_init(void);
+
+/*
+ *Initialize the device context CUS fields
+ *(shortcut semaphore and public CUS list)
+ */
+void tf_crypto_init_cus(struct tf_connection *connection);
+
+/**
+ *Terminate the public crypto (including DMA)
+ */
+void tf_crypto_terminate(void);
+
+int tf_crypto_try_shortcuted_update(struct tf_connection *connection,
+ struct tf_command_invoke_client_command *command,
+ struct tf_answer_invoke_client_command *answer);
+
+int tf_crypto_execute_rpc(u32 rpc_command, void *rpc_shared_buffer);
+
+/*-------------------------------------------------------------------------- */
+/*
+ *Helper methods
+ */
+u32 tf_crypto_wait_for_ready_bit(u32 *reg, u32 bit);
+void tf_crypto_wait_for_ready_bit_infinitely(u32 *reg, u32 bit);
+
+void tf_crypto_enable_clock(uint32_t clock_paddr);
+void tf_crypto_disable_clock(uint32_t clock_paddr);
+
+#define LOCK_HWA true
+#define UNLOCK_HWA false
+
+void tf_crypto_lock_hwa(u32 hwa_id, bool do_lock);
+
+/*---------------------------------------------------------------------------*/
+/* AES operations */
+/*---------------------------------------------------------------------------*/
+
+void tf_aes_init(void);
+void tf_aes_exit(void);
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+int register_smc_public_crypto_aes(void);
+void unregister_smc_public_crypto_aes(void);
+#else
+static inline int register_smc_public_crypto_aes(void)
+{
+ return 0;
+}
+
+static inline void unregister_smc_public_crypto_aes(void) {}
+#endif
+
+/**
+ *This function performs an AES update operation.
+ *
+ *The AES1 accelerator is assumed loaded with the correct key
+ *
+ *AES_CTRL: defines the mode and direction
+ *aes_state: defines the operation IV
+ *src: Input buffer to process.
+ *dest: Output buffer containing the processed data.
+ *
+ *nb_blocks number of block(s)to process.
+ */
+bool tf_aes_update(struct tf_crypto_aes_operation_state *aes_state,
+ u8 *src, u8 *dest, u32 nb_blocks);
+
+/*---------------------------------------------------------------------------*/
+/* DES/DES3 operations */
+/*---------------------------------------------------------------------------*/
+
+void tf_des_init(void);
+void tf_des_exit(void);
+
+/**
+ *This function performs a DES update operation.
+ *
+ *The DES accelerator is assumed loaded with the correct key
+ *
+ *DES_CTRL: defines the mode and direction
+ *des_state: defines the operation IV
+ *src: Input buffer to process.
+ *dest: Output buffer containing the processed data.
+ *nb_blocks: Number of block(s)to process.
+ */
+bool tf_des_update(u32 DES_CTRL,
+ struct tf_crypto_des_operation_state *des_state,
+ u8 *src, u8 *dest, u32 nb_blocks);
+
+/*---------------------------------------------------------------------------*/
+/* Digest operations */
+/*---------------------------------------------------------------------------*/
+
+void tf_digest_init(void);
+void tf_digest_exit(void);
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+int register_smc_public_crypto_digest(void);
+void unregister_smc_public_crypto_digest(void);
+#else
+static inline int register_smc_public_crypto_digest(void)
+{
+ return 0;
+}
+
+static inline void unregister_smc_public_crypto_digest(void) {}
+#endif
+
+/**
+ *This function performs a HASH update Operation.
+ *
+ *SHA_CTRL: defines the algorithm
+ *sha_state: State of the operation
+ *data: Input buffer to process
+ *data_length: Length in bytes of the input buffer.
+ */
+bool tf_digest_update(
+ struct tf_crypto_sha_operation_state *sha_state,
+ u8 *data, u32 data_length);
+
+#endif /*__TF_PUBLIC_CRYPTO_H */
diff --git a/security/smc/tf_crypto_aes.c b/security/smc/tf_crypto_aes.c
new file mode 100644
index 0000000..36dc522
--- /dev/null
+++ b/security/smc/tf_crypto_aes.c
@@ -0,0 +1,1380 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_crypto.h"
+#include "tf_dma.h"
+#include "tf_zebra.h"
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/aes.h>
+#include <mach/io.h>
+
+/*
+ *AES Hardware Accelerator: Base address
+ */
+#define AES1_REGS_HW_ADDR 0x4B501000
+
+/*
+ *CTRL register Masks
+ */
+#define AES_CTRL_OUTPUT_READY_BIT (1<<0)
+#define AES_CTRL_INPUT_READY_BIT (1<<1)
+
+#define AES_CTRL_GET_DIRECTION(x) (x&4)
+#define AES_CTRL_DIRECTION_DECRYPT 0
+#define AES_CTRL_DIRECTION_ENCRYPT (1<<2)
+
+#define AES_CTRL_GET_KEY_SIZE(x) (x & 0x18)
+#define AES_CTRL_KEY_SIZE_128 0x08
+#define AES_CTRL_KEY_SIZE_192 0x10
+#define AES_CTRL_KEY_SIZE_256 0x18
+
+#define AES_CTRL_GET_MODE(x) ((x & 0x60) >> 5)
+#define AES_CTRL_IS_MODE_CBC(x) (AES_CTRL_GET_MODE(x) == 1)
+#define AES_CTRL_IS_MODE_ECB(x) (AES_CTRL_GET_MODE(x) == 0)
+#define AES_CTRL_IS_MODE_CTR(x) ((AES_CTRL_GET_MODE(x) == 2) || \
+ (AES_CTRL_GET_MODE(x) == 3))
+#define AES_CTRL_MODE_CBC_BIT 0x20
+#define AES_CTRL_MODE_ECB_BIT 0
+#define AES_CTRL_MODE_CTR_BIT 0x40
+
+#define AES_CTRL_GET_CTR_WIDTH(x) (x&0x180)
+#define AES_CTRL_CTR_WIDTH_32 0
+#define AES_CTRL_CTR_WIDTH_64 0x80
+#define AES_CTRL_CTR_WIDTH_96 0x100
+#define AES_CTRL_CTR_WIDTH_128 0x180
+
+/*
+ * SYSCONFIG register masks
+ */
+#define AES_SYSCONFIG_DMA_REQ_IN_EN_BIT (1 << 5)
+#define AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT (1 << 6)
+
+
+/*----------------------------------------------------------------------*/
+/* AES Context */
+/*----------------------------------------------------------------------*/
+/**
+ *This structure contains the registers of the AES HW accelerator.
+ */
+struct aes_reg {
+ u32 AES_KEY2_6; /* 0x00 */
+ u32 AES_KEY2_7; /* 0xO4 */
+ u32 AES_KEY2_4; /* 0x08 */
+ u32 AES_KEY2_5; /* 0x0C */
+ u32 AES_KEY2_2; /* 0x10 */
+ u32 AES_KEY2_3; /* 0x14 */
+ u32 AES_KEY2_0; /* 0x18 */
+ u32 AES_KEY2_1; /* 0x1C */
+
+ u32 AES_KEY1_6; /* 0x20 */
+ u32 AES_KEY1_7; /* 0x24 */
+ u32 AES_KEY1_4; /* 0x28 */
+ u32 AES_KEY1_5; /* 0x2C */
+ u32 AES_KEY1_2; /* 0x30 */
+ u32 AES_KEY1_3; /* 0x34 */
+ u32 AES_KEY1_0; /* 0x38 */
+ u32 AES_KEY1_1; /* 0x3C */
+
+ u32 AES_IV_IN_0; /* 0x40 */
+ u32 AES_IV_IN_1; /* 0x44 */
+ u32 AES_IV_IN_2; /* 0x48 */
+ u32 AES_IV_IN_3; /* 0x4C */
+
+ u32 AES_CTRL; /* 0x50 */
+
+ u32 AES_C_LENGTH_0; /* 0x54 */
+ u32 AES_C_LENGTH_1; /* 0x58 */
+ u32 AES_AUTH_LENGTH; /* 0x5C */
+
+ u32 AES_DATA_IN_0; /* 0x60 */
+ u32 AES_DATA_IN_1; /* 0x64 */
+ u32 AES_DATA_IN_2; /* 0x68 */
+ u32 AES_DATA_IN_3; /* 0x6C */
+
+ u32 AES_TAG_OUT_0; /* 0x70 */
+ u32 AES_TAG_OUT_1; /* 0x74 */
+ u32 AES_TAG_OUT_2; /* 0x78 */
+ u32 AES_TAG_OUT_3; /* 0x7C */
+
+ u32 AES_REVISION; /* 0x80 */
+ u32 AES_SYSCONFIG; /* 0x84 */
+
+ u32 AES_SYSSTATUS; /* 0x88 */
+
+};
+static struct aes_reg *paes_reg;
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+#define FLAGS_FAST BIT(7)
+#define FLAGS_BUSY 8
+
+struct aes_hwa_ctx {
+ unsigned long flags;
+
+ spinlock_t lock;
+ struct crypto_queue queue;
+
+ struct tasklet_struct task;
+
+ struct ablkcipher_request *req;
+ size_t total;
+ struct scatterlist *in_sg;
+ size_t in_offset;
+ struct scatterlist *out_sg;
+ size_t out_offset;
+
+ size_t buflen;
+ void *buf_in;
+ size_t dma_size;
+ int dma_in;
+ int dma_lch_in;
+ dma_addr_t dma_addr_in;
+ void *buf_out;
+ int dma_out;
+ int dma_lch_out;
+ dma_addr_t dma_addr_out;
+
+ struct tf_crypto_aes_operation_state *ctx;
+};
+static struct aes_hwa_ctx *aes_ctx;
+#endif
+
+/*---------------------------------------------------------------------------
+ *Forward declarations
+ *------------------------------------------------------------------------- */
+
+static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, bool is_kernel);
+
+/*----------------------------------------------------------------------------
+ *Save HWA registers into the specified operation state structure
+ *--------------------------------------------------------------------------*/
+static void tf_aes_save_registers(
+ struct tf_crypto_aes_operation_state *aes_state)
+{
+ dprintk(KERN_INFO "tf_aes_save_registers: "
+ "aes_state(%p) <- paes_reg(%p): CTRL=0x%08x\n",
+ aes_state, paes_reg, aes_state->CTRL);
+
+ /*Save the IV if we are in CBC or CTR mode (not required for ECB) */
+ if (!AES_CTRL_IS_MODE_ECB(aes_state->CTRL)) {
+ aes_state->AES_IV_0 = INREG32(&paes_reg->AES_IV_IN_0);
+ aes_state->AES_IV_1 = INREG32(&paes_reg->AES_IV_IN_1);
+ aes_state->AES_IV_2 = INREG32(&paes_reg->AES_IV_IN_2);
+ aes_state->AES_IV_3 = INREG32(&paes_reg->AES_IV_IN_3);
+ }
+}
+
+/*----------------------------------------------------------------------------
+ *Restore the HWA registers from the operation state structure
+ *---------------------------------------------------------------------------*/
+static void tf_aes_restore_registers(
+ struct tf_crypto_aes_operation_state *aes_state)
+{
+ struct tf_device *dev = tf_get_device();
+
+ dprintk(KERN_INFO "tf_aes_restore_registers: "
+ "paes_reg(%p) <- aes_state(%p): CTRL=0x%08x\n",
+ paes_reg, aes_state, aes_state->CTRL);
+
+ if (aes_state->key_is_public) {
+ OUTREG32(&paes_reg->AES_KEY1_0, aes_state->KEY1_0);
+ OUTREG32(&paes_reg->AES_KEY1_1, aes_state->KEY1_1);
+ OUTREG32(&paes_reg->AES_KEY1_2, aes_state->KEY1_2);
+ OUTREG32(&paes_reg->AES_KEY1_3, aes_state->KEY1_3);
+ OUTREG32(&paes_reg->AES_KEY1_4, aes_state->KEY1_4);
+ OUTREG32(&paes_reg->AES_KEY1_5, aes_state->KEY1_5);
+ OUTREG32(&paes_reg->AES_KEY1_6, aes_state->KEY1_6);
+ OUTREG32(&paes_reg->AES_KEY1_7, aes_state->KEY1_7);
+
+ /*
+ * Make sure a potential secure key that has been overwritten by
+ * the previous code is reinstalled before performing other
+ * public crypto operations.
+ */
+ dev->aes1_key_context = 0;
+ } else {
+ aes_state->CTRL |= INREG32(&paes_reg->AES_CTRL);
+ }
+
+ /*
+ * Restore the IV first if we are in CBC or CTR mode
+ * (not required for ECB)
+ */
+ if (!AES_CTRL_IS_MODE_ECB(aes_state->CTRL)) {
+ OUTREG32(&paes_reg->AES_IV_IN_0, aes_state->AES_IV_0);
+ OUTREG32(&paes_reg->AES_IV_IN_1, aes_state->AES_IV_1);
+ OUTREG32(&paes_reg->AES_IV_IN_2, aes_state->AES_IV_2);
+ OUTREG32(&paes_reg->AES_IV_IN_3, aes_state->AES_IV_3);
+ }
+
+ /* Then set the CTRL register:
+ * overwrite the CTRL only when needed, because unconditionally doing
+ * it leads to break the HWA process (observed by experimentation)
+ */
+
+ aes_state->CTRL = (aes_state->CTRL & (3 << 3)) /* key size */
+ | (aes_state->CTRL & ((1 << 2) | (1 << 5) | (1 << 6)))
+ | (0x3 << 7) /* Always set CTR_WIDTH to 128-bit */;
+
+ if ((aes_state->CTRL & 0x1FC) !=
+ (INREG32(&paes_reg->AES_CTRL) & 0x1FC))
+ OUTREG32(&paes_reg->AES_CTRL, aes_state->CTRL & 0x1FC);
+
+ /* Set the SYSCONFIG register to 0 */
+ OUTREG32(&paes_reg->AES_SYSCONFIG, 0);
+}
+
+/*-------------------------------------------------------------------------- */
+
+void tf_aes_init(void)
+{
+ paes_reg = omap_ioremap(AES1_REGS_HW_ADDR, SZ_1M, MT_DEVICE);
+ if (paes_reg == NULL)
+ panic("Unable to remap AES1 module");
+}
+
+void tf_aes_exit(void)
+{
+ omap_iounmap(paes_reg);
+}
+
+bool tf_aes_update(struct tf_crypto_aes_operation_state *aes_state,
+ u8 *src, u8 *dest, u32 nb_blocks)
+{
+ u32 nbr_of_blocks;
+ u32 temp;
+ u8 *process_src;
+ u8 *process_dest;
+ u32 dma_use = PUBLIC_CRYPTO_DMA_USE_NONE;
+ bool is_kernel = false;
+
+ /*
+ *Choice of the processing type
+ */
+ if (nb_blocks * AES_BLOCK_SIZE >= DMA_TRIGGER_IRQ_AES)
+ dma_use = PUBLIC_CRYPTO_DMA_USE_IRQ;
+
+ dprintk(KERN_INFO "tf_aes_update: "
+ "src=0x%08x, dest=0x%08x, nb_blocks=0x%08x, dma_use=0x%08x\n",
+ (unsigned int)src,
+ (unsigned int)dest,
+ (unsigned int)nb_blocks,
+ (unsigned int)dma_use);
+
+ if (aes_state->key_is_public)
+ is_kernel = true;
+
+ if (nb_blocks == 0) {
+ dprintk(KERN_INFO "tf_aes_update: Nothing to process\n");
+ return true;
+ }
+
+ if ((AES_CTRL_GET_DIRECTION(INREG32(&paes_reg->AES_CTRL)) !=
+ AES_CTRL_GET_DIRECTION(aes_state->CTRL)) &&
+ !aes_state->key_is_public) {
+ dprintk(KERN_WARNING "HWA configured for another direction\n");
+ return false;
+ }
+
+ /*Restore the registers of the accelerator from the operation state */
+ tf_aes_restore_registers(aes_state);
+
+ if (dma_use == PUBLIC_CRYPTO_DMA_USE_IRQ) {
+ /* Perform the update with DMA */
+ if (!tf_aes_update_dma(src, dest, nb_blocks, is_kernel))
+ return false;
+ } else {
+ u8 buf[DMA_TRIGGER_IRQ_AES];
+
+ /*
+ * Synchronous Linux crypto API buffers are mapped in kernel
+ * space
+ */
+
+ if (is_kernel) {
+ process_src = src;
+ process_dest = dest;
+ } else {
+ if (copy_from_user(buf, src,
+ nb_blocks * AES_BLOCK_SIZE))
+ return false;
+
+ process_src = process_dest = buf;
+ }
+
+ for (nbr_of_blocks = 0;
+ nbr_of_blocks < nb_blocks; nbr_of_blocks++) {
+
+ /*We wait for the input ready */
+
+ /*Crash the system as this should never occur */
+ if (tf_crypto_wait_for_ready_bit(
+ (u32 *)&paes_reg->AES_CTRL,
+ AES_CTRL_INPUT_READY_BIT) !=
+ PUBLIC_CRYPTO_OPERATION_SUCCESS)
+ panic("Wait too long for AES hardware "
+ "accelerator Input data to be ready\n");
+
+ /* We copy the 16 bytes of data src->reg */
+ temp = (u32) BYTES_TO_LONG(process_src);
+ OUTREG32(&paes_reg->AES_DATA_IN_0, temp);
+ process_src += 4;
+ temp = (u32) BYTES_TO_LONG(process_src);
+ OUTREG32(&paes_reg->AES_DATA_IN_1, temp);
+ process_src += 4;
+ temp = (u32) BYTES_TO_LONG(process_src);
+ OUTREG32(&paes_reg->AES_DATA_IN_2, temp);
+ process_src += 4;
+ temp = (u32) BYTES_TO_LONG(process_src);
+ OUTREG32(&paes_reg->AES_DATA_IN_3, temp);
+ process_src += 4;
+
+ /* We wait for the output ready */
+ tf_crypto_wait_for_ready_bit_infinitely(
+ (u32 *)&paes_reg->AES_CTRL,
+ AES_CTRL_OUTPUT_READY_BIT);
+
+ /* We copy the 16 bytes of data reg->dest */
+ temp = INREG32(&paes_reg->AES_DATA_IN_0);
+ LONG_TO_BYTE(process_dest, temp);
+ process_dest += 4;
+ temp = INREG32(&paes_reg->AES_DATA_IN_1);
+ LONG_TO_BYTE(process_dest, temp);
+ process_dest += 4;
+ temp = INREG32(&paes_reg->AES_DATA_IN_2);
+ LONG_TO_BYTE(process_dest, temp);
+ process_dest += 4;
+ temp = INREG32(&paes_reg->AES_DATA_IN_3);
+ LONG_TO_BYTE(process_dest, temp);
+ process_dest += 4;
+ }
+
+ if (!is_kernel)
+ if (copy_to_user(dest, buf,
+ nb_blocks * AES_BLOCK_SIZE))
+ return false;
+ }
+
+ /* Save the accelerator registers into the operation state */
+ tf_aes_save_registers(aes_state);
+
+ dprintk(KERN_INFO "tf_aes_update: Done\n");
+
+ return true;
+}
+
+/*-------------------------------------------------------------------------- */
+/*
+ *Static function, perform AES encryption/decryption using the DMA for data
+ *transfer.
+ *
+ *inputs: src : pointer of the input data to process
+ * nb_blocks : number of block to process
+ * dma_use : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA)
+ * | PUBLIC_CRYPTO_DMA_USE_POLLING (poll the end of DMA)
+ *output: dest : pointer of the output data (can be eq to src)
+ */
+static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, bool is_kernel)
+{
+ /*
+ *Note: The DMA only sees physical addresses !
+ */
+
+ int dma_ch0;
+ int dma_ch1;
+ struct omap_dma_channel_params ch0_parameters;
+ struct omap_dma_channel_params ch1_parameters;
+ u32 length = nb_blocks * AES_BLOCK_SIZE;
+ u32 length_loop = 0;
+ u32 nb_blocksLoop = 0;
+ struct tf_device *dev = tf_get_device();
+
+ dprintk(KERN_INFO
+ "%s: In=0x%08x, Out=0x%08x, Len=%u\n",
+ __func__,
+ (unsigned int)src,
+ (unsigned int)dest,
+ (unsigned int)length);
+
+ /*lock the DMA */
+ while (!mutex_trylock(&dev->sm.dma_mutex))
+ cpu_relax();
+
+ if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+ if (tf_dma_request(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ omap_free_dma(dma_ch0);
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+
+ while (length > 0) {
+
+ /*
+ * At this time, we are sure that the DMAchannels
+ *are available and not used by other public crypto operation
+ */
+
+ /*DMA used for Input and Output */
+ OUTREG32(&paes_reg->AES_SYSCONFIG,
+ INREG32(&paes_reg->AES_SYSCONFIG)
+ | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
+ | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);
+
+ /*check length */
+ if (length <= dev->dma_buffer_length)
+ length_loop = length;
+ else
+ length_loop = dev->dma_buffer_length;
+
+ /*The length is always a multiple of the block size */
+ nb_blocksLoop = length_loop / AES_BLOCK_SIZE;
+
+ /*
+ * Copy the data from the user input buffer into a preallocated
+ * buffer which has correct properties from efficient DMA
+ * transfers.
+ */
+ if (!is_kernel) {
+ if (copy_from_user(
+ dev->dma_buffer, src, length_loop)) {
+ omap_free_dma(dma_ch0);
+ omap_free_dma(dma_ch1);
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+ } else {
+ memcpy(dev->dma_buffer, src, length_loop);
+ }
+
+ /*DMA1: Mem -> AES */
+ tf_dma_set_channel_common_params(&ch0_parameters,
+ nb_blocksLoop,
+ DMA_CEN_Elts_per_Frame_AES,
+ AES1_REGS_HW_ADDR + 0x60,
+ (u32)dev->dma_buffer_phys,
+ OMAP44XX_DMA_AES1_P_DATA_IN_REQ);
+
+ ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC;
+ ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+
+ dprintk(KERN_INFO "%s: omap_set_dma_params(ch0)\n", __func__);
+ omap_set_dma_params(dma_ch0, &ch0_parameters);
+
+ omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_data_pack(dma_ch0, 1);
+
+ /*DMA2: AES -> Mem */
+ tf_dma_set_channel_common_params(&ch1_parameters,
+ nb_blocksLoop,
+ DMA_CEN_Elts_per_Frame_AES,
+ (u32)dev->dma_buffer_phys,
+ AES1_REGS_HW_ADDR + 0x60,
+ OMAP44XX_DMA_AES1_P_DATA_OUT_REQ);
+
+ ch1_parameters.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+
+ dprintk(KERN_INFO "%s: omap_set_dma_params(ch1)\n", __func__);
+ omap_set_dma_params(dma_ch1, &ch1_parameters);
+
+ omap_set_dma_src_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_dest_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_dest_data_pack(dma_ch1, 1);
+
+ wmb();
+
+ dprintk(KERN_INFO
+ "%s: Start DMA channel %d\n",
+ __func__, (unsigned int)dma_ch1);
+ tf_dma_start(dma_ch1, OMAP_DMA_BLOCK_IRQ);
+ dprintk(KERN_INFO
+ "%s: Start DMA channel %d\n",
+ __func__, (unsigned int)dma_ch0);
+ tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ);
+
+ dprintk(KERN_INFO
+ "%s: Waiting for IRQ\n", __func__);
+ tf_dma_wait(2);
+
+ /*Unset DMA synchronisation requests */
+ OUTREG32(&paes_reg->AES_SYSCONFIG,
+ INREG32(&paes_reg->AES_SYSCONFIG)
+ & (~AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT)
+ & (~AES_SYSCONFIG_DMA_REQ_IN_EN_BIT));
+
+ omap_clear_dma(dma_ch0);
+ omap_clear_dma(dma_ch1);
+
+ /*
+ *The dma transfer is complete
+ */
+
+ /*The DMA output is in the preallocated aligned buffer
+ *and needs to be copied to the output buffer.*/
+ if (!is_kernel) {
+ if (copy_to_user(
+ dest, dev->dma_buffer, length_loop)) {
+ omap_free_dma(dma_ch0);
+ omap_free_dma(dma_ch1);
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+ } else {
+ memcpy(dest, dev->dma_buffer, length_loop);
+ }
+
+ src += length_loop;
+ dest += length_loop;
+ length -= length_loop;
+ }
+
+ /*For safety reasons, let's clean the working buffer */
+ memset(dev->dma_buffer, 0, length_loop);
+
+ /*release the DMA */
+ omap_free_dma(dma_ch0);
+ omap_free_dma(dma_ch1);
+
+ mutex_unlock(&dev->sm.dma_mutex);
+
+ dprintk(KERN_INFO "%s: Success\n", __func__);
+
+ return true;
+}
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+/*
+ * AES HWA registration into kernel crypto framework
+ */
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
+ size_t buflen, size_t total, int out)
+{
+ unsigned int count, off = 0;
+
+ while (buflen && total) {
+ count = min((*sg)->length - *offset, total);
+ count = min(count, buflen);
+
+ if (!count)
+ return off;
+
+ sg_copy_buf(buf + off, *sg, *offset, count, out);
+
+ off += count;
+ buflen -= count;
+ *offset += count;
+ total -= count;
+
+ if (*offset == (*sg)->length) {
+ *sg = sg_next(*sg);
+ if (*sg)
+ *offset = 0;
+ else
+ total = 0;
+ }
+ }
+
+ return off;
+}
+
+static int aes_dma_start(struct aes_hwa_ctx *ctx)
+{
+ int err, fast = 0, in, out;
+ size_t count;
+ dma_addr_t addr_in, addr_out;
+ struct omap_dma_channel_params dma_params;
+ struct tf_crypto_aes_operation_state *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
+
+ if (sg_is_last(ctx->in_sg) && sg_is_last(ctx->out_sg)) {
+ in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32));
+ out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32));
+
+ fast = in && out;
+ }
+
+ if (fast) {
+ count = min(ctx->total, sg_dma_len(ctx->in_sg));
+ count = min(count, sg_dma_len(ctx->out_sg));
+
+ if (count != ctx->total)
+ return -EINVAL;
+
+ err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ if (!err)
+ return -EINVAL;
+
+ err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
+ if (!err) {
+ dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+
+ addr_in = sg_dma_address(ctx->in_sg);
+ addr_out = sg_dma_address(ctx->out_sg);
+
+ ctx->flags |= FLAGS_FAST;
+ } else {
+ count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in,
+ ctx->buflen, ctx->total, 0);
+
+ addr_in = ctx->dma_addr_in;
+ addr_out = ctx->dma_addr_out;
+
+ ctx->flags &= ~FLAGS_FAST;
+ }
+
+ ctx->total -= count;
+
+ tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
+
+ /* Configure HWA */
+ tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ tf_aes_restore_registers(state);
+
+ OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG)
+ | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
+ | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);
+
+ ctx->dma_size = count;
+ if (!fast)
+ dma_sync_single_for_device(NULL, addr_in, count,
+ DMA_TO_DEVICE);
+
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.frame_count = count / AES_BLOCK_SIZE;
+ dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
+ dma_params.src_ei = 0;
+ dma_params.src_fi = 0;
+ dma_params.dst_ei = 0;
+ dma_params.dst_fi = 0;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+
+ /* IN */
+ dma_params.trigger = ctx->dma_in;
+ dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+ dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
+ dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ dma_params.src_start = addr_in;
+ dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
+
+ omap_set_dma_params(ctx->dma_lch_in, &dma_params);
+
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);
+
+ /* OUT */
+ dma_params.trigger = ctx->dma_out;
+ dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+ dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
+ dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ dma_params.dst_start = addr_out;
+ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+
+ omap_set_dma_params(ctx->dma_lch_out, &dma_params);
+
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);
+
+ /* Is this really needed? */
+ omap_disable_dma_irq(ctx->dma_lch_in, OMAP_DMA_DROP_IRQ);
+ omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ);
+ omap_disable_dma_irq(ctx->dma_lch_out, OMAP_DMA_DROP_IRQ);
+ omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ);
+
+ wmb();
+
+ omap_start_dma(ctx->dma_lch_in);
+ omap_start_dma(ctx->dma_lch_out);
+
+ return 0;
+}
+
+static int aes_dma_stop(struct aes_hwa_ctx *ctx)
+{
+ struct tf_crypto_aes_operation_state *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
+ int err = 0;
+ size_t count;
+
+ dprintk(KERN_INFO "aes_dma_stop(%p)\n", ctx);
+
+ tf_aes_save_registers(state);
+
+ if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
+ u32 *ptr = (u32 *) ctx->req->info;
+
+ ptr[0] = state->AES_IV_0;
+ ptr[1] = state->AES_IV_1;
+ ptr[2] = state->AES_IV_2;
+ ptr[3] = state->AES_IV_3;
+ }
+
+ OUTREG32(&paes_reg->AES_SYSCONFIG, 0);
+
+ tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
+
+ omap_stop_dma(ctx->dma_lch_in);
+ omap_stop_dma(ctx->dma_lch_out);
+
+ if (ctx->flags & FLAGS_FAST) {
+ dma_unmap_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(NULL, ctx->dma_addr_out,
+ ctx->dma_size, DMA_FROM_DEVICE);
+
+ /* Copy data */
+ count = sg_copy(&ctx->out_sg, &ctx->out_offset, ctx->buf_out,
+ ctx->buflen, ctx->dma_size, 1);
+ if (count != ctx->dma_size)
+ err = -EINVAL;
+ }
+
+ if (err || !ctx->total)
+ ctx->req->base.complete(&ctx->req->base, err);
+
+ return err;
+}
+
+static void aes_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct aes_hwa_ctx *ctx = data;
+
+ if (lch == ctx->dma_lch_out)
+ tasklet_schedule(&ctx->task);
+}
+
+static int aes_dma_init(struct aes_hwa_ctx *ctx)
+{
+ int err = -ENOMEM;
+
+ ctx->dma_lch_out = -1;
+ ctx->dma_lch_in = -1;
+
+ ctx->buflen = PAGE_SIZE;
+ ctx->buflen &= ~(AES_BLOCK_SIZE - 1);
+
+ dprintk(KERN_INFO "aes_dma_init(%p)\n", ctx);
+
+ /* Allocate and map cache buffers */
+ ctx->buf_in = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_in,
+ GFP_KERNEL);
+ if (!ctx->buf_in) {
+ dprintk(KERN_ERR "SMC: Unable to alloc AES in cache buffer\n");
+ return -ENOMEM;
+ }
+
+ ctx->buf_out = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_out,
+ GFP_KERNEL);
+ if (!ctx->buf_out) {
+ dprintk(KERN_ERR "SMC: Unable to alloc AES out cache buffer\n");
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_in,
+ ctx->dma_addr_in);
+ return -ENOMEM;
+ }
+
+ /* Request DMA channels */
+ err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback, ctx,
+ &ctx->dma_lch_in);
+ if (err) {
+ dprintk(KERN_ERR "SMC: Unable to request AES RX DMA channel\n");
+ goto err_dma_in;
+ }
+
+ err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback,
+ ctx, &ctx->dma_lch_out);
+ if (err) {
+ dprintk(KERN_ERR "SMC: Unable to request AES TX DMA channel\n");
+ goto err_dma_out;
+ }
+
+ dprintk(KERN_INFO "aes_dma_init(%p) configured DMA channels"
+ "(RX = %d, TX = %d)\n", ctx, ctx->dma_lch_in, ctx->dma_lch_out);
+
+ return 0;
+
+err_dma_out:
+ omap_free_dma(ctx->dma_lch_in);
+err_dma_in:
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in);
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out);
+
+ return err;
+}
+
+static void aes_dma_cleanup(struct aes_hwa_ctx *ctx)
+{
+ omap_free_dma(ctx->dma_lch_out);
+ omap_free_dma(ctx->dma_lch_in);
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in);
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out);
+}
+
+static int aes_handle_req(struct aes_hwa_ctx *ctx)
+{
+ struct tf_crypto_aes_operation_state *state;
+ struct crypto_async_request *async_req, *backlog;
+ struct ablkcipher_request *req;
+ unsigned long flags;
+
+ if (ctx->total)
+ goto start;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ backlog = crypto_get_backlog(&ctx->queue);
+ async_req = crypto_dequeue_request(&ctx->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &ctx->flags);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ if (!async_req)
+ return 0;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ ctx->req = req;
+ ctx->total = req->nbytes;
+ ctx->in_offset = 0;
+ ctx->in_sg = req->src;
+ ctx->out_offset = 0;
+ ctx->out_sg = req->dst;
+
+ state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
+ u32 *ptr = (u32 *) req->info;
+
+ state->AES_IV_0 = ptr[0];
+ state->AES_IV_1 = ptr[1];
+ state->AES_IV_2 = ptr[2];
+ state->AES_IV_3 = ptr[3];
+ }
+
+start:
+ return aes_dma_start(ctx);
+}
+
+static void aes_tasklet(unsigned long data)
+{
+ struct aes_hwa_ctx *ctx = (struct aes_hwa_ctx *) data;
+
+ aes_dma_stop(ctx);
+ aes_handle_req(ctx);
+}
+
+/* Generic */
+static int aes_setkey(struct tf_crypto_aes_operation_state *state,
+ const u8 *key, unsigned int keylen)
+{
+ u32 *ptr = (u32 *)key;
+
+ switch (keylen) {
+ case 16:
+ state->CTRL |= AES_CTRL_KEY_SIZE_128;
+ break;
+ case 24:
+ state->CTRL |= AES_CTRL_KEY_SIZE_192;
+ break;
+ case 32:
+ state->CTRL |= AES_CTRL_KEY_SIZE_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ state->KEY1_0 = ptr[0];
+ state->KEY1_1 = ptr[1];
+ state->KEY1_2 = ptr[2];
+ state->KEY1_3 = ptr[3];
+ if (keylen >= 24) {
+ state->KEY1_4 = ptr[4];
+ state->KEY1_5 = ptr[5];
+ }
+ if (keylen == 32) {
+ state->KEY1_6 = ptr[6];
+ state->KEY1_7 = ptr[7];
+ }
+
+ state->key_is_public = 1;
+
+ return 0;
+}
+
+static int aes_operate(struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int err;
+
+ /* Make sure AES HWA is accessible */
+ tf_delayed_secure_resume();
+
+ spin_lock_irqsave(&aes_ctx->lock, flags);
+ err = ablkcipher_enqueue_request(&aes_ctx->queue, req);
+ spin_unlock_irqrestore(&aes_ctx->lock, flags);
+
+ if (!test_and_set_bit(FLAGS_BUSY, &aes_ctx->flags))
+ aes_handle_req(aes_ctx);
+
+ return err;
+}
+
+static int aes_encrypt(struct ablkcipher_request *req)
+{
+ struct tf_crypto_aes_operation_state *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
+
+ return aes_operate(req);
+}
+
+static int aes_decrypt(struct ablkcipher_request *req)
+{
+ struct tf_crypto_aes_operation_state *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT);
+ state->CTRL |= AES_CTRL_DIRECTION_DECRYPT;
+
+ return aes_operate(req);
+}
+
+static int aes_sync_operate(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct crypto_tfm *tfm = crypto_blkcipher_tfm(desc->tfm);
+ struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm);
+ struct blkcipher_walk walk;
+ int err;
+
+ if (nbytes % AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ /* Make sure AES HWA is accessible */
+ tf_delayed_secure_resume();
+
+ tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
+ tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
+ u32 *ptr = (u32 *) walk.iv;
+
+ state->AES_IV_0 = ptr[0];
+ state->AES_IV_1 = ptr[1];
+ state->AES_IV_2 = ptr[2];
+ state->AES_IV_3 = ptr[3];
+ }
+
+ while ((nbytes = walk.nbytes)) {
+ if (!tf_aes_update(state, walk.src.virt.addr,
+ walk.dst.virt.addr, nbytes / AES_BLOCK_SIZE)) {
+ err = -EINVAL;
+ break;
+ }
+
+ /* tf_aes_update processes all the data */
+ nbytes = 0;
+
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
+ u32 *ptr = (u32 *) walk.iv;
+
+ ptr[0] = state->AES_IV_0;
+ ptr[1] = state->AES_IV_1;
+ ptr[2] = state->AES_IV_2;
+ ptr[3] = state->AES_IV_3;
+ }
+
+ tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+ tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
+
+ return err;
+}
+
+static int aes_sync_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct crypto_tfm *tfm = crypto_blkcipher_tfm(desc->tfm);
+ struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm);
+
+ state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
+
+ dprintk(KERN_INFO "aes_sync_encrypt nbytes=0x%x\n", nbytes);
+
+ return aes_sync_operate(desc, dst, src, nbytes);
+}
+
+static int aes_sync_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct crypto_tfm *tfm = crypto_blkcipher_tfm(desc->tfm);
+ struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm);
+
+ state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT);
+ state->CTRL |= AES_CTRL_DIRECTION_DECRYPT;
+
+ dprintk(KERN_INFO "aes_sync_decrypt\n");
+
+ return aes_sync_operate(desc, dst, src, nbytes);
+}
+
+/* AES ECB */
+static int aes_ecb_sync_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_ECB_BIT;
+
+ dprintk(KERN_INFO "aes_ecb_sync_setkey\n");
+
+ return aes_setkey(state, key, keylen);
+}
+
+static int aes_ecb_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tf_crypto_aes_operation_state *state =
+ crypto_ablkcipher_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_ECB_BIT;
+
+ return aes_setkey(state, key, keylen);
+}
+
+/* AES CBC */
+static int aes_cbc_sync_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_CBC_BIT;
+
+ dprintk(KERN_INFO "aes_cbc_sync_setkey\n");
+
+ return aes_setkey(state, key, keylen);
+}
+
+static int aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tf_crypto_aes_operation_state *state =
+ crypto_ablkcipher_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_CBC_BIT;
+
+ return aes_setkey(state, key, keylen);
+}
+
+/* AES CTR */
+static int aes_ctr_sync_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_CTR_BIT;
+
+ dprintk(KERN_INFO "aes_cbc_sync_setkey\n");
+
+ return aes_setkey(state, key, keylen);
+}
+
+static int aes_ctr_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct tf_crypto_aes_operation_state *state =
+ crypto_ablkcipher_ctx(tfm);
+
+ /* Always defaults to 128-bit counter */
+ state->CTRL = AES_CTRL_MODE_CTR_BIT | AES_CTRL_CTR_WIDTH_128;
+
+ return aes_setkey(state, key, keylen);
+}
+
+static struct crypto_alg smc_aes_ecb_sync_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_priority = 999,
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "aes-ecb-smc",
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct tf_crypto_aes_operation_state),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_ecb_sync_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ecb_sync_setkey,
+ .encrypt = aes_sync_encrypt,
+ .decrypt = aes_sync_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_cbc_sync_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_priority = 999,
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "aes-cbc-smc",
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct tf_crypto_aes_operation_state),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_cbc_sync_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE,
+ .setkey = aes_cbc_sync_setkey,
+ .encrypt = aes_sync_encrypt,
+ .decrypt = aes_sync_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_ctr_sync_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
+ .cra_priority = 999,
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "aes-ctr-smc",
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct tf_crypto_aes_operation_state),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_ctr_sync_alg.cra_list),
+ .cra_u = {
+ .blkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE,
+ .setkey = aes_ctr_sync_setkey,
+ .encrypt = aes_sync_encrypt,
+ .decrypt = aes_sync_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_ecb_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_priority = 999,
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "aes-ecb-smc",
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct tf_crypto_aes_operation_state),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ecb_setkey,
+ .encrypt = aes_encrypt,
+ .decrypt = aes_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_cbc_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_priority = 999,
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "aes-cbc-smc",
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct tf_crypto_aes_operation_state),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE,
+ .setkey = aes_cbc_setkey,
+ .encrypt = aes_encrypt,
+ .decrypt = aes_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_ctr_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_priority = 999,
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "aes-ctr-smc",
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct tf_crypto_aes_operation_state),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_ctr_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE,
+ .setkey = aes_ctr_setkey,
+ .encrypt = aes_encrypt,
+ .decrypt = aes_decrypt,
+ }
+ },
+};
+
+
+int register_smc_public_crypto_aes(void)
+{
+ int ret;
+
+ aes_ctx = kzalloc(sizeof(struct aes_hwa_ctx), GFP_KERNEL);
+ if (aes_ctx == NULL)
+ return -ENOMEM;
+
+ crypto_init_queue(&aes_ctx->queue, 1);
+ tasklet_init(&aes_ctx->task, aes_tasklet, (unsigned long)aes_ctx);
+ spin_lock_init(&aes_ctx->lock);
+
+ aes_ctx->dma_in = OMAP44XX_DMA_AES1_P_DATA_IN_REQ;
+ aes_ctx->dma_out = OMAP44XX_DMA_AES1_P_DATA_OUT_REQ;
+
+ ret = aes_dma_init(aes_ctx);
+ if (ret)
+ goto err_dma;
+
+ ret = crypto_register_alg(&smc_aes_ecb_sync_alg);
+ if (ret)
+ goto err_ecb_sync;
+
+ ret = crypto_register_alg(&smc_aes_cbc_sync_alg);
+ if (ret)
+ goto err_cbc_sync;
+
+ ret = crypto_register_alg(&smc_aes_ctr_sync_alg);
+ if (ret)
+ goto err_ctr_sync;
+
+ ret = crypto_register_alg(&smc_aes_ecb_alg);
+ if (ret)
+ goto err_ecb;
+
+ ret = crypto_register_alg(&smc_aes_cbc_alg);
+ if (ret)
+ goto err_cbc;
+
+ ret = crypto_register_alg(&smc_aes_ctr_alg);
+ if (ret)
+ goto err_ctr;
+
+ return 0;
+
+err_ctr:
+ crypto_unregister_alg(&smc_aes_cbc_alg);
+err_cbc:
+ crypto_unregister_alg(&smc_aes_ecb_alg);
+err_ecb:
+ crypto_unregister_alg(&smc_aes_ctr_sync_alg);
+err_ctr_sync:
+ crypto_unregister_alg(&smc_aes_cbc_sync_alg);
+err_cbc_sync:
+ crypto_unregister_alg(&smc_aes_ecb_sync_alg);
+err_ecb_sync:
+ aes_dma_cleanup(aes_ctx);
+err_dma:
+ tasklet_kill(&aes_ctx->task);
+ kfree(aes_ctx);
+ return ret;
+}
+
+void unregister_smc_public_crypto_aes(void)
+{
+ if (aes_ctx == NULL)
+ return;
+
+ crypto_unregister_alg(&smc_aes_ecb_sync_alg);
+ crypto_unregister_alg(&smc_aes_cbc_sync_alg);
+ crypto_unregister_alg(&smc_aes_ctr_sync_alg);
+
+ crypto_unregister_alg(&smc_aes_ecb_alg);
+ crypto_unregister_alg(&smc_aes_cbc_alg);
+ crypto_unregister_alg(&smc_aes_ctr_alg);
+
+ aes_dma_cleanup(aes_ctx);
+
+ tasklet_kill(&aes_ctx->task);
+ kfree(aes_ctx);
+}
+#endif
diff --git a/security/smc/tf_crypto_des.c b/security/smc/tf_crypto_des.c
new file mode 100644
index 0000000..716a60f
--- /dev/null
+++ b/security/smc/tf_crypto_des.c
@@ -0,0 +1,404 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_crypto.h"
+#include "tf_dma.h"
+
+#include <linux/io.h>
+#include <mach/io.h>
+
+/*
+ * DES Hardware Accelerator: Base address
+ */
+#define DES_REGS_HW_ADDR 0x480A5000
+
+/*
+ * CTRL register Masks
+ */
+#define DES_CTRL_OUTPUT_READY_BIT (1<<0)
+#define DES_CTRL_INPUT_READY_BIT (1<<1)
+
+#define DES_CTRL_GET_DIRECTION(x) (x&4)
+#define DES_CTRL_DIRECTION_DECRYPT 0
+#define DES_CTRL_DIRECTION_ENCRYPT (1<<2)
+
+#define DES_CTRL_GET_TDES(x) (x&8)
+#define DES_CTRL_TDES_DES 0
+#define DES_CTRL_TDES_TRIPLE_DES (1<<3)
+
+#define DES_CTRL_GET_MODE(x) (x&0x10)
+#define DES_CTRL_MODE_ECB 0
+#define DES_CTRL_MODE_CBC (1<<4)
+
+/*
+ * SYSCONFIG register masks
+ */
+#define DES_SYSCONFIG_DMA_REQ_IN_EN_BIT (1<<5)
+#define DES_SYSCONFIG_DMA_REQ_OUT_EN_BIT (1<<6)
+
+/*------------------------------------------------------------------------*/
+/* DES/DES3 Context */
+/*------------------------------------------------------------------------*/
+/**
+ * This structure contains the registers of the DES HW accelerator.
+ */
+struct des3_des_reg {
+ u32 DES_KEY3_L; /* DES Key 3 Low Register */
+ u32 DES_KEY3_H; /* DES Key 3 High Register */
+ u32 DES_KEY2_L; /* DES Key 2 Low Register */
+ u32 DES_KEY2_H; /* DES Key 2 High Register */
+ u32 DES_KEY1_L; /* DES Key 1 Low Register */
+ u32 DES_KEY1_H; /* DES Key 1 High Register */
+ u32 DES_IV_L; /* DES Initialization Vector Low Reg */
+ u32 DES_IV_H; /* DES Initialization Vector High Reg */
+ u32 DES_CTRL; /* DES Control Register */
+ u32 DES_LENGTH; /* DES Length Register */
+ u32 DES_DATA_L; /* DES Data Input/Output Low Register */
+ u32 DES_DATA_H; /* DES Data Input/Output High Register */
+ u32 DES_REV; /* DES Revision Register */
+ u32 DES_SYSCONFIG; /* DES Mask and Reset Register */
+ u32 DES_SYSSTATUS; /* DES System Status Register */
+};
+
+static struct des3_des_reg *des_reg;
+
+/*------------------------------------------------------------------------
+ *Forward declarations
+ *------------------------------------------------------------------------ */
+
+static bool tf_des_update_dma(u8 *src, u8 *dest, u32 nb_blocks);
+
+/*-------------------------------------------------------------------------
+ *Save HWA registers into the specified operation state structure
+ *-------------------------------------------------------------------------*/
+static void tf_des_save_registers(u32 DES_CTRL,
+ struct tf_crypto_des_operation_state *des_state)
+{
+ dprintk(KERN_INFO
+ "tf_des_save_registers in des_state=%p CTRL=0x%08x\n",
+ des_state, DES_CTRL);
+
+ /*Save the IV if we are in CBC mode */
+ if (DES_CTRL_GET_MODE(DES_CTRL) == DES_CTRL_MODE_CBC) {
+ des_state->DES_IV_L = INREG32(&des_reg->DES_IV_L);
+ des_state->DES_IV_H = INREG32(&des_reg->DES_IV_H);
+ }
+}
+
+/*-------------------------------------------------------------------------
+ *Restore the HWA registers from the operation state structure
+ *-------------------------------------------------------------------------*/
+static void tf_des_restore_registers(u32 DES_CTRL,
+ struct tf_crypto_des_operation_state *des_state)
+{
+ dprintk(KERN_INFO "tf_des_restore_registers from "
+ "des_state=%p CTRL=0x%08x\n",
+ des_state, DES_CTRL);
+
+ /*Write the IV ctx->reg */
+ if (DES_CTRL_GET_MODE(DES_CTRL) == DES_CTRL_MODE_CBC) {
+ OUTREG32(&des_reg->DES_IV_L, des_state->DES_IV_L);
+ OUTREG32(&des_reg->DES_IV_H, des_state->DES_IV_H);
+ }
+
+ /*Set the DIRECTION and CBC bits in the CTRL register.
+ *Keep the TDES from the accelerator */
+ OUTREG32(&des_reg->DES_CTRL,
+ (INREG32(&des_reg->DES_CTRL) & (1 << 3)) |
+ (DES_CTRL & ((1 << 2) | (1 << 4))));
+
+ /*Set the SYSCONFIG register to 0 */
+ OUTREG32(&des_reg->DES_SYSCONFIG, 0);
+}
+
+/*------------------------------------------------------------------------- */
+
+void tf_des_init(void)
+{
+ des_reg = omap_ioremap(DES_REGS_HW_ADDR, SZ_1M, MT_DEVICE);
+ if (des_reg == NULL)
+ panic("Unable to remap DES/3DES module");
+}
+
+void tf_des_exit(void)
+{
+ omap_iounmap(des_reg);
+}
+
+bool tf_des_update(u32 DES_CTRL,
+ struct tf_crypto_des_operation_state *des_state,
+ u8 *src, u8 *dest, u32 nb_blocks)
+{
+ u32 nbr_of_blocks;
+ u32 temp;
+ u8 *process_src;
+ u8 *process_dest;
+ u32 dma_use = PUBLIC_CRYPTO_DMA_USE_NONE;
+
+ /*
+ *Choice of the processing type
+ */
+ if (nb_blocks * DES_BLOCK_SIZE >= DMA_TRIGGER_IRQ_DES)
+ dma_use = PUBLIC_CRYPTO_DMA_USE_IRQ;
+
+ dprintk(KERN_INFO "tf_des_update: "
+ "src=0x%08x, dest=0x%08x, nb_blocks=0x%08x, dma_use=0x%08x\n",
+ (unsigned int)src, (unsigned int)dest,
+ (unsigned int)nb_blocks, (unsigned int)dma_use);
+
+ if (nb_blocks == 0) {
+ dprintk(KERN_INFO "tf_des_update: Nothing to process\n");
+ return true;
+ }
+
+ if (DES_CTRL_GET_DIRECTION(INREG32(&des_reg->DES_CTRL)) !=
+ DES_CTRL_GET_DIRECTION(DES_CTRL)) {
+ dprintk(KERN_WARNING "HWA configured for another direction\n");
+ return false;
+ }
+
+ /*Restore the registers of the accelerator from the operation state */
+ tf_des_restore_registers(DES_CTRL, des_state);
+
+ OUTREG32(&des_reg->DES_LENGTH, nb_blocks * DES_BLOCK_SIZE);
+
+ if (dma_use == PUBLIC_CRYPTO_DMA_USE_IRQ) {
+
+ /*perform the update with DMA */
+ if (!tf_des_update_dma(src, dest, nb_blocks))
+ return false;
+
+ } else {
+ u8 buf[DMA_TRIGGER_IRQ_DES];
+
+ process_src = process_dest = buf;
+
+ if (copy_from_user(buf, src, nb_blocks * DES_BLOCK_SIZE))
+ return false;
+
+ for (nbr_of_blocks = 0;
+ nbr_of_blocks < nb_blocks; nbr_of_blocks++) {
+
+ /*We wait for the input ready */
+ /*Crash the system as this should never occur */
+ if (tf_crypto_wait_for_ready_bit(
+ (u32 *)&des_reg->DES_CTRL,
+ DES_CTRL_INPUT_READY_BIT) !=
+ PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ panic("Wait too long for DES HW "
+ "accelerator Input data to be ready\n");
+ }
+
+ /*We copy the 8 bytes of data src->reg */
+ temp = (u32) BYTES_TO_LONG(process_src);
+ OUTREG32(&des_reg->DES_DATA_L, temp);
+ process_src += 4;
+ temp = (u32) BYTES_TO_LONG(process_src);
+ OUTREG32(&des_reg->DES_DATA_H, temp);
+ process_src += 4;
+
+ /*We wait for the output ready */
+ tf_crypto_wait_for_ready_bit_infinitely(
+ (u32 *)&des_reg->DES_CTRL,
+ DES_CTRL_OUTPUT_READY_BIT);
+
+ /*We copy the 8 bytes of data reg->dest */
+ temp = INREG32(&des_reg->DES_DATA_L);
+ LONG_TO_BYTE(process_dest, temp);
+ process_dest += 4;
+ temp = INREG32(&des_reg->DES_DATA_H);
+ LONG_TO_BYTE(process_dest, temp);
+ process_dest += 4;
+ }
+
+ if (copy_to_user(dest, buf, nb_blocks * DES_BLOCK_SIZE))
+ return false;
+ }
+
+ /*Save the accelerator registers into the operation state */
+ tf_des_save_registers(DES_CTRL, des_state);
+
+ dprintk(KERN_INFO "tf_des_update: Done\n");
+ return true;
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ *Static function, perform DES encryption/decryption using the DMA for data
+ *transfer.
+ *
+ *inputs: src : pointer of the input data to process
+ * nb_blocks : number of block to process
+ * dma_use : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA)
+ *output: dest : pointer of the output data (can be eq to src)
+ */
+static bool tf_des_update_dma(u8 *src, u8 *dest, u32 nb_blocks)
+{
+ /*
+ *Note: The DMA only sees physical addresses !
+ */
+
+ int dma_ch0;
+ int dma_ch1;
+ struct omap_dma_channel_params ch0_parameters;
+ struct omap_dma_channel_params ch1_parameters;
+ u32 length = nb_blocks * DES_BLOCK_SIZE;
+ u32 length_loop = 0;
+ u32 nb_blocksLoop = 0;
+ struct tf_device *dev = tf_get_device();
+
+ dprintk(KERN_INFO
+ "tf_des_update_dma: In=0x%08x, Out=0x%08x, Len=%u\n",
+ (unsigned int)src, (unsigned int)dest,
+ (unsigned int)length);
+
+ /*lock the DMA */
+ mutex_lock(&dev->sm.dma_mutex);
+
+ if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+ if (tf_dma_request(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ omap_free_dma(dma_ch0);
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+
+ while (length > 0) {
+
+ /*
+ * At this time, we are sure that the DMAchannels are available
+ * and not used by other public crypto operation
+ */
+
+ /*DMA used for Input and Output */
+ OUTREG32(&des_reg->DES_SYSCONFIG,
+ INREG32(&des_reg->DES_SYSCONFIG)
+ | DES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
+ | DES_SYSCONFIG_DMA_REQ_IN_EN_BIT);
+
+ /* Check length */
+ if (length <= dev->dma_buffer_length)
+ length_loop = length;
+ else
+ length_loop = dev->dma_buffer_length;
+
+ /* The length is always a multiple of the block size */
+ nb_blocksLoop = length_loop / DES_BLOCK_SIZE;
+
+ /*
+ * Copy the data from the user input buffer into a preallocated
+ * buffer which has correct properties from efficient DMA
+ * transfers.
+ */
+ if (copy_from_user(dev->dma_buffer, src, length_loop)) {
+ omap_free_dma(dma_ch0);
+ omap_free_dma(dma_ch1);
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+
+ /* DMA1: Mem -> DES */
+ tf_dma_set_channel_common_params(&ch0_parameters,
+ nb_blocksLoop,
+ DMA_CEN_Elts_per_Frame_DES,
+ DES_REGS_HW_ADDR + 0x28,
+ dev->dma_buffer_phys,
+ OMAP44XX_DMA_DES_P_DATA_IN_REQ);
+
+ ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC;
+ ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+
+ dprintk(KERN_INFO
+ "tf_des_update_dma: omap_set_dma_params(ch0)\n");
+ omap_set_dma_params(dma_ch0, &ch0_parameters);
+
+ /* DMA2: DES -> Mem */
+ tf_dma_set_channel_common_params(&ch1_parameters,
+ nb_blocksLoop,
+ DMA_CEN_Elts_per_Frame_DES,
+ dev->dma_buffer_phys,
+ DES_REGS_HW_ADDR + 0x28,
+ OMAP44XX_DMA_DES_P_DATA_OUT_REQ);
+
+ ch1_parameters.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+
+ dprintk(KERN_INFO "tf_des_update_dma: "
+ "omap_set_dma_params(ch1)\n");
+ omap_set_dma_params(dma_ch1, &ch1_parameters);
+
+ wmb();
+
+ dprintk(KERN_INFO
+ "tf_des_update_dma: Start DMA channel %d\n",
+ (unsigned int)dma_ch0);
+ tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ);
+
+ dprintk(KERN_INFO
+ "tf_des_update_dma: Start DMA channel %d\n",
+ (unsigned int)dma_ch1);
+ tf_dma_start(dma_ch1, OMAP_DMA_BLOCK_IRQ);
+ tf_dma_wait(2);
+
+ /* Unset DMA synchronisation requests */
+ OUTREG32(&des_reg->DES_SYSCONFIG,
+ INREG32(&des_reg->DES_SYSCONFIG)
+ & (~DES_SYSCONFIG_DMA_REQ_OUT_EN_BIT)
+ & (~DES_SYSCONFIG_DMA_REQ_IN_EN_BIT));
+
+ omap_clear_dma(dma_ch0);
+ omap_clear_dma(dma_ch1);
+
+ /*
+ * The dma transfer is complete
+ */
+
+ /*The DMA output is in the preallocated aligned buffer
+ *and needs to be copied to the output buffer.*/
+ if (copy_to_user(dest, dev->dma_buffer, length_loop)) {
+ omap_free_dma(dma_ch0);
+ omap_free_dma(dma_ch1);
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+
+ src += length_loop;
+ dest += length_loop;
+ length -= length_loop;
+ }
+
+ /* For safety reasons, let's clean the working buffer */
+ memset(dev->dma_buffer, 0, length_loop);
+
+ /* Release the DMA */
+ omap_free_dma(dma_ch0);
+ omap_free_dma(dma_ch1);
+
+ mutex_unlock(&dev->sm.dma_mutex);
+
+ dprintk(KERN_INFO "tf_des_update_dma: Success\n");
+
+ return true;
+}
diff --git a/security/smc/tf_crypto_digest.c b/security/smc/tf_crypto_digest.c
new file mode 100644
index 0000000..d69a97f
--- /dev/null
+++ b/security/smc/tf_crypto_digest.c
@@ -0,0 +1,992 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_crypto.h"
+#include "tf_dma.h"
+#include "tf_zebra.h"
+
+#include <linux/io.h>
+#include <mach/io.h>
+#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+
+/*
+ * SHA2/MD5 Hardware Accelerator: Base address for SHA2/MD5 HIB2
+ * This is referenced as the SHA2MD5 module in the Crypto TRM
+ */
+#define DIGEST1_REGS_HW_ADDR 0x4B101000
+
+/*
+ * IRQSTATUS register Masks
+ */
+#define DIGEST_IRQSTATUS_OUTPUT_READY_BIT (1 << 0)
+#define DIGEST_IRQSTATUS_INPUT_READY_BIT (1 << 1)
+#define DIGEST_IRQSTATUS_PARTHASH_READY_BIT (1 << 2)
+#define DIGEST_IRQSTATUS_CONTEXT_READY_BIT (1 << 3)
+
+/*
+ * MODE register Masks
+ */
+#define DIGEST_MODE_GET_ALGO(x) ((x & 0x6) >> 1)
+#define DIGEST_MODE_SET_ALGO(x, a) ((a << 1) | (x & 0xFFFFFFF9))
+
+#define DIGEST_MODE_ALGO_CONST_BIT (1 << 3)
+#define DIGEST_MODE_CLOSE_HASH_BIT (1 << 4)
+
+/*
+ * SYSCONFIG register masks
+ */
+#define DIGEST_SYSCONFIG_PIT_EN_BIT (1 << 2)
+#define DIGEST_SYSCONFIG_PDMA_EN_BIT (1 << 3)
+#define DIGEST_SYSCONFIG_PCONT_SWT_BIT (1 << 6)
+#define DIGEST_SYSCONFIG_PADVANCED_BIT (1 << 7)
+
+/*-------------------------------------------------------------------------*/
+/* Digest Context */
+/*-------------------------------------------------------------------------*/
+/**
+ * This structure contains the registers of the SHA1/MD5 HW accelerator.
+ */
+struct sha1_md5_reg {
+ u32 ODIGEST_A; /* 0x00 Outer Digest A */
+ u32 ODIGEST_B; /* 0x04 Outer Digest B */
+ u32 ODIGEST_C; /* 0x08 Outer Digest C */
+ u32 ODIGEST_D; /* 0x0C Outer Digest D */
+ u32 ODIGEST_E; /* 0x10 Outer Digest E */
+ u32 ODIGEST_F; /* 0x14 Outer Digest F */
+ u32 ODIGEST_G; /* 0x18 Outer Digest G */
+ u32 ODIGEST_H; /* 0x1C Outer Digest H */
+ u32 IDIGEST_A; /* 0x20 Inner Digest A */
+ u32 IDIGEST_B; /* 0x24 Inner Digest B */
+ u32 IDIGEST_C; /* 0x28 Inner Digest C */
+ u32 IDIGEST_D; /* 0x2C Inner Digest D */
+ u32 IDIGEST_E; /* 0x30 Inner Digest E */
+ u32 IDIGEST_F; /* 0x34 Inner Digest F */
+ u32 IDIGEST_G; /* 0x38 Inner Digest G */
+ u32 IDIGEST_H; /* 0x3C Inner Digest H */
+ u32 DIGEST_COUNT; /* 0x40 Digest count */
+ u32 MODE; /* 0x44 Digest mode */
+ u32 LENGTH; /* 0x48 Data length */
+
+ u32 reserved0[13];
+
+ u32 DIN_0; /* 0x80 Data 0 */
+ u32 DIN_1; /* 0x84 Data 1 */
+ u32 DIN_2; /* 0x88 Data 2 */
+ u32 DIN_3; /* 0x8C Data 3 */
+ u32 DIN_4; /* 0x90 Data 4 */
+ u32 DIN_5; /* 0x94 Data 5 */
+ u32 DIN_6; /* 0x98 Data 6 */
+ u32 DIN_7; /* 0x9C Data 7 */
+ u32 DIN_8; /* 0xA0 Data 8 */
+ u32 DIN_9; /* 0xA4 Data 9 */
+ u32 DIN_10; /* 0xA8 Data 10 */
+ u32 DIN_11; /* 0xAC Data 11 */
+ u32 DIN_12; /* 0xB0 Data 12 */
+ u32 DIN_13; /* 0xB4 Data 13 */
+ u32 DIN_14; /* 0xB8 Data 14 */
+ u32 DIN_15; /* 0xBC Data 15 */
+
+ u32 reserved1[16];
+
+ u32 REVISION; /* 0x100 Revision */
+
+ u32 reserved2[3];
+
+ u32 SYSCONFIG; /* 0x110 Config */
+ u32 SYSSTATUS; /* 0x114 Status */
+ u32 IRQSTATUS; /* 0x118 IRQ Status */
+ u32 IRQENABLE; /* 0x11C IRQ Enable */
+};
+
+static struct sha1_md5_reg *sha1_md5_reg;
+
+static const u8 md5OverEmptyString[] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e
+};
+
+static const u8 sha1OverEmptyString[] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+
+static const u8 sha224OverEmptyString[] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
+ 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
+ 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
+ 0xc5, 0xb3, 0xe4, 0x2f
+};
+
+static const u8 sha256OverEmptyString[] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+
+/*------------------------------------------------------------------------
+ *Forward declarations
+ *------------------------------------------------------------------------- */
+
+static void tf_digest_hw_perform_64b(u32 *data,
+ u32 algo, u32 bytes_processed);
+static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
+ u32 algo, u32 bytes_processed);
+
+static bool tf_digest_update_dma(
+ struct tf_crypto_sha_operation_state *sha_state,
+ u8 *data, u32 data_length);
+
+
+/*-------------------------------------------------------------------------
+ *Save HWA registers into the specified operation state structure
+ *------------------------------------------------------------------------*/
+static void tf_digest_save_registers(
+ struct tf_crypto_sha_operation_state *sha_state)
+{
+ dprintk(KERN_INFO "tf_digest_save_registers: State=%p\n",
+ sha_state);
+
+ sha_state->SHA_DIGEST_A = INREG32(&sha1_md5_reg->IDIGEST_A);
+ sha_state->SHA_DIGEST_B = INREG32(&sha1_md5_reg->IDIGEST_B);
+ sha_state->SHA_DIGEST_C = INREG32(&sha1_md5_reg->IDIGEST_C);
+ sha_state->SHA_DIGEST_D = INREG32(&sha1_md5_reg->IDIGEST_D);
+ sha_state->SHA_DIGEST_E = INREG32(&sha1_md5_reg->IDIGEST_E);
+ sha_state->SHA_DIGEST_F = INREG32(&sha1_md5_reg->IDIGEST_F);
+ sha_state->SHA_DIGEST_G = INREG32(&sha1_md5_reg->IDIGEST_G);
+ sha_state->SHA_DIGEST_H = INREG32(&sha1_md5_reg->IDIGEST_H);
+}
+
+/*-------------------------------------------------------------------------
+ *Restore the HWA registers from the operation state structure
+ *-------------------------------------------------------------------------*/
+static void tf_digest_restore_registers(
+ struct tf_crypto_sha_operation_state *sha_state)
+{
+ dprintk(KERN_INFO "tf_digest_restore_registers: State=%p\n",
+ sha_state);
+
+ if (sha_state->bytes_processed != 0) {
+ /*
+ * Some bytes were already processed. Initialize
+ * previous digest
+ */
+ OUTREG32(&sha1_md5_reg->IDIGEST_A, sha_state->SHA_DIGEST_A);
+ OUTREG32(&sha1_md5_reg->IDIGEST_B, sha_state->SHA_DIGEST_B);
+ OUTREG32(&sha1_md5_reg->IDIGEST_C, sha_state->SHA_DIGEST_C);
+ OUTREG32(&sha1_md5_reg->IDIGEST_D, sha_state->SHA_DIGEST_D);
+ OUTREG32(&sha1_md5_reg->IDIGEST_E, sha_state->SHA_DIGEST_E);
+ OUTREG32(&sha1_md5_reg->IDIGEST_F, sha_state->SHA_DIGEST_F);
+ OUTREG32(&sha1_md5_reg->IDIGEST_G, sha_state->SHA_DIGEST_G);
+ OUTREG32(&sha1_md5_reg->IDIGEST_H, sha_state->SHA_DIGEST_H);
+ }
+
+ OUTREG32(&sha1_md5_reg->SYSCONFIG, 0);
+}
+
+/*------------------------------------------------------------------------- */
+
+void tf_digest_init(void)
+{
+ sha1_md5_reg = omap_ioremap(DIGEST1_REGS_HW_ADDR, SZ_1M, MT_DEVICE);
+ if (sha1_md5_reg == NULL)
+ panic("Unable to remap SHA2/MD5 module");
+}
+
+void tf_digest_exit(void)
+{
+ omap_iounmap(sha1_md5_reg);
+}
+
+bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
+ u8 *data, u32 data_length)
+{
+ u32 dma_use = PUBLIC_CRYPTO_DMA_USE_NONE;
+
+ /*
+ *Choice of the processing type
+ */
+ if (data_length >= DMA_TRIGGER_IRQ_DIGEST)
+ dma_use = PUBLIC_CRYPTO_DMA_USE_IRQ;
+
+ dprintk(KERN_INFO "tf_digest_update : "\
+ "Data=0x%08x/%u, Chunk=%u, Processed=%u, dma_use=0x%08x\n",
+ (u32)data, (u32)data_length,
+ sha_state->chunk_length, sha_state->bytes_processed,
+ dma_use);
+
+ if (data_length == 0) {
+ dprintk(KERN_INFO "tf_digest_update: "\
+ "Nothing to process\n");
+ return true;
+ }
+
+ if (dma_use != PUBLIC_CRYPTO_DMA_USE_NONE) {
+ /*
+ * Restore the registers of the accelerator from the operation
+ * state
+ */
+ tf_digest_restore_registers(sha_state);
+
+ /*perform the updates with DMA */
+ if (!tf_digest_update_dma(sha_state, data, data_length))
+ return false;
+
+ /* Save the accelerator registers into the operation state */
+ tf_digest_save_registers(sha_state);
+ } else {
+ /*Non-DMA transfer */
+
+ /*(1)We take the chunk buffer wich contains the last saved
+ *data that could not be yet processed because we had not
+ *enough data to make a 64B buffer. Then we try to make a
+ *64B buffer by concatenating it with the new passed data
+ */
+
+ /*Is there any data in the chunk? If yes is it possible to
+ *make a 64B buffer with the new data passed ? */
+ if ((sha_state->chunk_length != 0)
+ && (sha_state->chunk_length + data_length >=
+ HASH_BLOCK_BYTES_LENGTH)) {
+
+ u8 vLengthToComplete =
+ HASH_BLOCK_BYTES_LENGTH - sha_state->chunk_length;
+
+ /*So we fill the chunk buffer with the new data to
+ *complete to 64B */
+ if (copy_from_user(
+ sha_state->chunk_buffer+sha_state->chunk_length,
+ data,
+ vLengthToComplete))
+ return false;
+
+ if (sha_state->chunk_length + data_length ==
+ HASH_BLOCK_BYTES_LENGTH) {
+ /*We'll keep some data for the final */
+ sha_state->chunk_length =
+ HASH_BLOCK_BYTES_LENGTH;
+ dprintk(KERN_INFO "tf_digest_update: "\
+ "Done: Chunk=%u; Processed=%u\n",
+ sha_state->chunk_length,
+ sha_state->bytes_processed);
+ return true;
+ }
+
+ /*
+ * Restore the registers of the accelerator from the
+ * operation state
+ */
+ tf_digest_restore_registers(sha_state);
+
+ /*Then we send this buffer to the HWA */
+ tf_digest_hw_perform_64b(
+ (u32 *)sha_state->chunk_buffer, sha_state->CTRL,
+ sha_state->bytes_processed);
+
+ /*
+ * Save the accelerator registers into the operation
+ * state
+ */
+ tf_digest_save_registers(sha_state);
+
+ sha_state->bytes_processed =
+ INREG32(&sha1_md5_reg->DIGEST_COUNT);
+
+ /*We have flushed the chunk so it is empty now */
+ sha_state->chunk_length = 0;
+
+ /*Then we have less data to process */
+ data += vLengthToComplete;
+ data_length -= vLengthToComplete;
+ }
+
+ /*(2)We process all the 64B buffer that we can */
+ if (sha_state->chunk_length + data_length >=
+ HASH_BLOCK_BYTES_LENGTH) {
+
+ while (data_length > HASH_BLOCK_BYTES_LENGTH) {
+ u8 pTempAlignedBuffer[HASH_BLOCK_BYTES_LENGTH];
+
+ /*
+ *We process a 64B buffer
+ */
+ /*We copy the data to process to an aligned
+ *buffer */
+ if (copy_from_user(
+ pTempAlignedBuffer,
+ data,
+ HASH_BLOCK_BYTES_LENGTH))
+ return false;
+
+ /*Then we send this buffer to the hash
+ *hardware */
+ tf_digest_restore_registers(sha_state);
+ tf_digest_hw_perform_64b(
+ (u32 *) pTempAlignedBuffer,
+ sha_state->CTRL,
+ sha_state->bytes_processed);
+ tf_digest_save_registers(sha_state);
+
+ sha_state->bytes_processed =
+ INREG32(&sha1_md5_reg->DIGEST_COUNT);
+
+ /*Then we decrease the remaining data of 64B */
+ data += HASH_BLOCK_BYTES_LENGTH;
+ data_length -= HASH_BLOCK_BYTES_LENGTH;
+ }
+ }
+
+ /*(3)We look if we have some data that could not be processed
+ *yet because it is not large enough to fill a buffer of 64B */
+ if (data_length > 0) {
+ if (sha_state->chunk_length + data_length >
+ HASH_BLOCK_BYTES_LENGTH) {
+ /*Should never be in this case !!! */
+ panic("tf_digest_update: chunk_length data_length > "
+ "HASH_BLOCK_BYTES_LENGTH\n");
+ }
+
+ /*So we fill the chunk buffer with the new data to
+ *complete to 64B */
+ if (copy_from_user(
+ sha_state->chunk_buffer+sha_state->chunk_length,
+ data,
+ data_length))
+ return false;
+ sha_state->chunk_length += data_length;
+ }
+ }
+
+ dprintk(KERN_INFO "tf_digest_update: Done: "\
+ "Chunk=%u; Processed=%u\n",
+ sha_state->chunk_length, sha_state->bytes_processed);
+
+ return true;
+}
+
+/*------------------------------------------------------------------------- */
+
+static void tf_digest_hw_perform_64b(u32 *data,
+ u32 algo, u32 bytes_processed)
+{
+ u32 algo_constant = 0;
+
+ OUTREG32(&sha1_md5_reg->DIGEST_COUNT, bytes_processed);
+
+ if (bytes_processed == 0) {
+ /* No bytes processed so far. Will use the algo constant instead
+ of previous digest */
+ algo_constant = 1 << 3;
+ }
+
+ OUTREG32(&sha1_md5_reg->MODE,
+ algo_constant | (algo & 0x6));
+ OUTREG32(&sha1_md5_reg->LENGTH, HASH_BLOCK_BYTES_LENGTH);
+
+ if (tf_crypto_wait_for_ready_bit(
+ (u32 *)&sha1_md5_reg->IRQSTATUS,
+ DIGEST_IRQSTATUS_INPUT_READY_BIT)
+ != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ /* Crash the system as this should never occur */
+ panic("Wait too long for DIGEST HW accelerator" \
+ "Input data to be ready\n");
+ }
+
+ /*
+ *The data buffer is a buffer of 64 bytes.
+ */
+ OUTREG32(&sha1_md5_reg->DIN_0, data[0]);
+ OUTREG32(&sha1_md5_reg->DIN_1, data[1]);
+ OUTREG32(&sha1_md5_reg->DIN_2, data[2]);
+ OUTREG32(&sha1_md5_reg->DIN_3, data[3]);
+ OUTREG32(&sha1_md5_reg->DIN_4, data[4]);
+ OUTREG32(&sha1_md5_reg->DIN_5, data[5]);
+ OUTREG32(&sha1_md5_reg->DIN_6, data[6]);
+ OUTREG32(&sha1_md5_reg->DIN_7, data[7]);
+ OUTREG32(&sha1_md5_reg->DIN_8, data[8]);
+ OUTREG32(&sha1_md5_reg->DIN_9, data[9]);
+ OUTREG32(&sha1_md5_reg->DIN_10, data[10]);
+ OUTREG32(&sha1_md5_reg->DIN_11, data[11]);
+ OUTREG32(&sha1_md5_reg->DIN_12, data[12]);
+ OUTREG32(&sha1_md5_reg->DIN_13, data[13]);
+ OUTREG32(&sha1_md5_reg->DIN_14, data[14]);
+ OUTREG32(&sha1_md5_reg->DIN_15, data[15]);
+
+ /*
+ *Wait until the hash operation is finished.
+ */
+ tf_crypto_wait_for_ready_bit_infinitely(
+ (u32 *)&sha1_md5_reg->IRQSTATUS,
+ DIGEST_IRQSTATUS_OUTPUT_READY_BIT);
+}
+
+/*------------------------------------------------------------------------- */
+
+static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
+ u32 algo, u32 bytes_processed)
+{
+ /*
+ *Note: The DMA only sees physical addresses !
+ */
+
+ int dma_ch0;
+ struct omap_dma_channel_params ch0_parameters;
+ u32 length_loop = 0;
+ u32 algo_constant;
+ struct tf_device *dev = tf_get_device();
+
+ dprintk(KERN_INFO
+ "tf_digest_hw_perform_dma: Buffer=0x%08x/%u\n",
+ (u32)data, (u32)nDataLength);
+
+ /*lock the DMA */
+ mutex_lock(&dev->sm.dma_mutex);
+ if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+
+ while (nDataLength > 0) {
+
+ algo_constant = 0;
+ if (bytes_processed == 0) {
+ /*No bytes processed so far. Will use the algo
+ *constant instead of previous digest */
+ algo_constant = 1 << 3;
+ }
+
+ /*check length */
+ if (nDataLength <= dev->dma_buffer_length)
+ length_loop = nDataLength;
+ else
+ length_loop = dev->dma_buffer_length;
+
+ /*
+ * Copy the data from the user input buffer into a preallocated
+ * buffer which has correct properties from efficient DMA
+ * transfers.
+ */
+ if (copy_from_user(dev->dma_buffer, data, length_loop)) {
+ omap_free_dma(dma_ch0);
+ mutex_unlock(&dev->sm.dma_mutex);
+ return false;
+ }
+
+ /*DMA1: Mem -> HASH */
+ tf_dma_set_channel_common_params(&ch0_parameters,
+ length_loop / HASH_BLOCK_BYTES_LENGTH,
+ DMA_CEN_Elts_per_Frame_SHA,
+ DIGEST1_REGS_HW_ADDR + 0x80,
+ dev->dma_buffer_phys,
+ OMAP44XX_DMA_SHA2_DIN_P);
+
+ /*specific for Mem -> HWA */
+ ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC;
+ ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+
+ omap_set_dma_params(dma_ch0, &ch0_parameters);
+
+ omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+
+ OUTREG32(&sha1_md5_reg->DIGEST_COUNT, bytes_processed);
+ OUTREG32(&sha1_md5_reg->MODE,
+ algo_constant | (algo & 0x6));
+
+ /*
+ * Triggers operation
+ * Interrupt, Free Running + GO (DMA on)
+ */
+ OUTREG32(&sha1_md5_reg->SYSCONFIG,
+ INREG32(&sha1_md5_reg->SYSCONFIG) |
+ DIGEST_SYSCONFIG_PDMA_EN_BIT);
+ OUTREG32(&sha1_md5_reg->LENGTH, length_loop);
+
+ wmb();
+
+ tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ);
+
+ tf_dma_wait(1);
+
+ OUTREG32(&sha1_md5_reg->SYSCONFIG, 0);
+
+ omap_clear_dma(dma_ch0);
+
+ data += length_loop;
+ nDataLength -= length_loop;
+ bytes_processed =
+ INREG32(&sha1_md5_reg->DIGEST_COUNT);
+ }
+
+ /*For safety reasons, let's clean the working buffer */
+ memset(dev->dma_buffer, 0, length_loop);
+
+ /*release the DMA */
+ omap_free_dma(dma_ch0);
+
+ mutex_unlock(&dev->sm.dma_mutex);
+
+ /*
+ * The dma transfert is finished, now wait until the hash
+ * operation is finished.
+ */
+ tf_crypto_wait_for_ready_bit_infinitely(
+ (u32 *)&sha1_md5_reg->IRQSTATUS,
+ DIGEST_IRQSTATUS_CONTEXT_READY_BIT);
+
+ return true;
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ *Static function, perform data digest using the DMA for data transfer.
+ *
+ *inputs:
+ * data : pointer of the input data to process
+ * data_length : number of byte to process
+ */
+static bool tf_digest_update_dma(
+ struct tf_crypto_sha_operation_state *sha_state,
+ u8 *data, u32 data_length)
+{
+ dprintk(KERN_INFO "tf_digest_update_dma\n");
+
+ if (sha_state->chunk_length != 0) {
+
+ u32 vLengthToComplete;
+
+ /*Fill the chunk first */
+ if (sha_state->
+ chunk_length + data_length <= HASH_BLOCK_BYTES_LENGTH) {
+
+ /*So we fill the chunk buffer with the new data */
+ if (copy_from_user(sha_state->chunk_buffer +
+ sha_state->chunk_length, data,
+ data_length))
+ return false;
+ sha_state->chunk_length += data_length;
+
+ /*We'll keep some data for the final */
+ return true;
+ }
+
+ vLengthToComplete = HASH_BLOCK_BYTES_LENGTH - sha_state->
+ chunk_length;
+
+ if (vLengthToComplete != 0) {
+ /*So we fill the chunk buffer with the new data to
+ *complete to 64B */
+ if (copy_from_user(sha_state->chunk_buffer +
+ sha_state->chunk_length, data,
+ vLengthToComplete))
+ return false;
+ }
+
+ /*Then we send this buffer to the HWA (no DMA) */
+ tf_digest_hw_perform_64b(
+ (u32 *)sha_state->chunk_buffer, sha_state->CTRL,
+ sha_state->bytes_processed);
+
+ sha_state->bytes_processed =
+ INREG32(&sha1_md5_reg->DIGEST_COUNT);
+
+ /*We have flushed the chunk so it is empty now */
+ sha_state->chunk_length = 0;
+
+ /*Update the data buffer depending of the data already
+ *processed */
+ data += vLengthToComplete;
+ data_length -= vLengthToComplete;
+ }
+
+ if (data_length > HASH_BLOCK_BYTES_LENGTH) {
+
+ /*DMA only manages data length that is multiple of 64b */
+ u32 vDmaProcessize = data_length & 0xFFFFFFC0;
+
+ if (vDmaProcessize == data_length) {
+ /*We keep one block for the final */
+ vDmaProcessize -= HASH_BLOCK_BYTES_LENGTH;
+ }
+
+ if (!tf_digest_hw_perform_dma(data, vDmaProcessize,
+ sha_state->CTRL, sha_state->bytes_processed))
+ return false;
+
+ sha_state->bytes_processed =
+ INREG32(&sha1_md5_reg->DIGEST_COUNT);
+ data += vDmaProcessize;
+ data_length -= vDmaProcessize;
+ }
+
+ /*At that point, there is less than 64b left to process*/
+ if ((data_length == 0) || (data_length > HASH_BLOCK_BYTES_LENGTH))
+ /*Should never be in this case !!! */
+ return false;
+
+ /*We now fill the chunk buffer with the remaining data */
+ if (copy_from_user(sha_state->chunk_buffer, data, data_length))
+ return false;
+ sha_state->chunk_length = data_length;
+
+ return true;
+}
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+static void tf_digest_init_operation(u32 alg,
+ struct tf_crypto_sha_operation_state *state)
+{
+ memset(state, 0, sizeof(struct tf_crypto_sha_operation_state));
+
+ state->CTRL = alg << 1;
+}
+
+static int static_Hash_HwReadDigest(u32 algo, u8 *out)
+{
+ u32 regs, tmp;
+ u32 idx = 0, i;
+
+ switch (algo) {
+ case DIGEST_CTRL_ALGO_MD5:
+ regs = 4;
+ break;
+ case DIGEST_CTRL_ALGO_SHA1:
+ regs = 5;
+ break;
+ case DIGEST_CTRL_ALGO_SHA224:
+ regs = 7;
+ break;
+ case DIGEST_CTRL_ALGO_SHA256:
+ regs = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < regs; i++) {
+ tmp = INREG32(&sha1_md5_reg->IDIGEST_A + i);
+
+ out[idx++] = (u8) ((tmp >> 0) & 0xff);
+ out[idx++] = (u8) ((tmp >> 8) & 0xff);
+ out[idx++] = (u8) ((tmp >> 16) & 0xff);
+ out[idx++] = (u8) ((tmp >> 24) & 0xff);
+ }
+
+ return 0;
+}
+
+static int tf_digest_final(struct tf_crypto_sha_operation_state *state,
+ u8 *out)
+{
+ u32 *data = (u32 *) state->chunk_buffer;
+
+ /* Hashing an empty string? */
+ if (state->bytes_processed + state->chunk_length == 0) {
+ switch (DIGEST_MODE_GET_ALGO(state->CTRL)) {
+ case DIGEST_CTRL_ALGO_MD5:
+ memcpy(out, md5OverEmptyString, HASH_MD5_LENGTH);
+ break;
+ case DIGEST_CTRL_ALGO_SHA1:
+ memcpy(out, sha1OverEmptyString, HASH_SHA1_LENGTH);
+ break;
+ case DIGEST_CTRL_ALGO_SHA224:
+ memcpy(out, sha224OverEmptyString, HASH_SHA224_LENGTH);
+ break;
+ case DIGEST_CTRL_ALGO_SHA256:
+ memcpy(out, sha256OverEmptyString, HASH_SHA256_LENGTH);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ tf_digest_restore_registers(state);
+
+ /*
+ * At this point, the chunk buffer should contain the last block of data
+ * needed for the final.
+ */
+ OUTREG32(&sha1_md5_reg->DIGEST_COUNT, state->bytes_processed);
+ OUTREG32(&sha1_md5_reg->MODE,
+ (state->CTRL & 0x6) | 0x10 |
+ (state->bytes_processed == 0) << 3);
+ OUTREG32(&sha1_md5_reg->LENGTH, state->chunk_length);
+
+ if (tf_crypto_wait_for_ready_bit(
+ (u32 *) &sha1_md5_reg->IRQSTATUS,
+ DIGEST_IRQSTATUS_INPUT_READY_BIT)
+ != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ /* Crash the system as this should never occur */
+ panic("Wait too long for DIGEST HW accelerator"
+ "Input data to be ready\n");
+ }
+
+ OUTREG32(&sha1_md5_reg->DIN_0, data[0]);
+ OUTREG32(&sha1_md5_reg->DIN_1, data[1]);
+ OUTREG32(&sha1_md5_reg->DIN_2, data[2]);
+ OUTREG32(&sha1_md5_reg->DIN_3, data[3]);
+ OUTREG32(&sha1_md5_reg->DIN_4, data[4]);
+ OUTREG32(&sha1_md5_reg->DIN_5, data[5]);
+ OUTREG32(&sha1_md5_reg->DIN_6, data[6]);
+ OUTREG32(&sha1_md5_reg->DIN_7, data[7]);
+ OUTREG32(&sha1_md5_reg->DIN_8, data[8]);
+ OUTREG32(&sha1_md5_reg->DIN_9, data[9]);
+ OUTREG32(&sha1_md5_reg->DIN_10, data[10]);
+ OUTREG32(&sha1_md5_reg->DIN_11, data[11]);
+ OUTREG32(&sha1_md5_reg->DIN_12, data[12]);
+ OUTREG32(&sha1_md5_reg->DIN_13, data[13]);
+ OUTREG32(&sha1_md5_reg->DIN_14, data[14]);
+ OUTREG32(&sha1_md5_reg->DIN_15, data[15]);
+
+ /* Wait till the hash operation is finished */
+ tf_crypto_wait_for_ready_bit_infinitely(
+ (u32 *) &sha1_md5_reg->IRQSTATUS,
+ DIGEST_IRQSTATUS_OUTPUT_READY_BIT);
+
+ return static_Hash_HwReadDigest(DIGEST_MODE_GET_ALGO(state->CTRL), out);
+}
+
+/*
+ * Digest HWA registration into kernel crypto framework
+ */
+
+static int digest_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
+
+ /* Make sure SHA/MD5 HWA is accessible */
+ tf_delayed_secure_resume();
+
+ tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
+
+ tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ tf_digest_update(state, (u8 *) data, len);
+
+ tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
+
+ return 0;
+}
+
+static int digest_final(struct shash_desc *desc, u8 *out)
+{
+ int ret;
+ struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
+
+ /* Make sure SHA/MD5 HWA is accessible */
+ tf_delayed_secure_resume();
+
+ tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
+
+ tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ ret = tf_digest_final(state, out);
+
+ tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
+
+ return ret;
+}
+
+static int digest_import(struct shash_desc *desc, const void *in)
+{
+ struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
+
+ memcpy(state, in, sizeof(*state));
+ return 0;
+}
+
+static int digest_export(struct shash_desc *desc, void *out)
+{
+ struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
+
+ memcpy(out, state, sizeof(*state));
+ return 0;
+}
+
+/* MD5 */
+static int md5_init(struct shash_desc *desc)
+{
+ struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
+
+ tf_digest_init_operation(DIGEST_CTRL_ALGO_MD5, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_md5_alg = {
+ .digestsize = HASH_MD5_LENGTH,
+ .init = md5_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct tf_crypto_sha_operation_state),
+ .statesize = sizeof(struct tf_crypto_sha_operation_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/* SHA1 */
+static int sha1_init(struct shash_desc *desc)
+{
+ struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
+
+ tf_digest_init_operation(DIGEST_CTRL_ALGO_SHA1, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_sha1_alg = {
+ .digestsize = HASH_SHA1_LENGTH,
+ .init = sha1_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct tf_crypto_sha_operation_state),
+ .statesize = sizeof(struct tf_crypto_sha_operation_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/* SHA224 */
+static int sha224_init(struct shash_desc *desc)
+{
+ struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
+
+ tf_digest_init_operation(DIGEST_CTRL_ALGO_SHA224, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_sha224_alg = {
+ .digestsize = HASH_SHA224_LENGTH,
+ .init = sha224_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct tf_crypto_sha_operation_state),
+ .statesize = sizeof(struct tf_crypto_sha_operation_state),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/* SHA256 */
+static int sha256_init(struct shash_desc *desc)
+{
+ struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
+
+ tf_digest_init_operation(DIGEST_CTRL_ALGO_SHA256, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_sha256_alg = {
+ .digestsize = HASH_SHA256_LENGTH,
+ .init = sha256_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct tf_crypto_sha_operation_state),
+ .statesize = sizeof(struct tf_crypto_sha_operation_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+int register_smc_public_crypto_digest(void)
+{
+ int ret;
+
+ dprintk(KERN_INFO "SMC: Registering digest algorithms\n");
+
+ ret = crypto_register_shash(&smc_md5_alg);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_shash(&smc_sha1_alg);
+ if (ret)
+ goto sha1_err;
+
+ ret = crypto_register_shash(&smc_sha224_alg);
+ if (ret)
+ goto sha224_err;
+
+ ret = crypto_register_shash(&smc_sha256_alg);
+ if (ret)
+ goto sha256_err;
+
+ return 0;
+
+sha256_err:
+ crypto_unregister_shash(&smc_sha224_alg);
+sha224_err:
+ crypto_unregister_shash(&smc_sha1_alg);
+sha1_err:
+ crypto_unregister_shash(&smc_md5_alg);
+ return ret;
+}
+
+void unregister_smc_public_crypto_digest(void)
+{
+ dprintk(KERN_INFO "SMC: Unregistering digest algorithms\n");
+
+ crypto_unregister_shash(&smc_md5_alg);
+ crypto_unregister_shash(&smc_sha1_alg);
+ crypto_unregister_shash(&smc_sha224_alg);
+ crypto_unregister_shash(&smc_sha256_alg);
+}
+#endif
diff --git a/security/smc/tf_defs.h b/security/smc/tf_defs.h
new file mode 100644
index 0000000..a8a5ce5
--- /dev/null
+++ b/security/smc/tf_defs.h
@@ -0,0 +1,544 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_DEFS_H__
+#define __TF_DEFS_H__
+
+#include <asm/atomic.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#include "tf_protocol.h"
+
+/*----------------------------------------------------------------------------*/
+
+#define SIZE_1KB 0x400
+
+/*
+ * Maximum number of shared memory blocks that can be reigsters in a connection
+ */
+#define TF_SHMEM_MAX_COUNT (64)
+
+/*
+ * Describes the possible types of shared memories
+ *
+ * TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are preallocated when initializing the
+ * connection
+ * TF_SHMEM_TYPE_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are not preallocated
+ * TF_SHMEM_TYPE_PM_HIBERNATE :
+ * The descriptor describes a power management shared memory.
+ */
+enum TF_SHMEM_TYPE {
+ TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
+ TF_SHMEM_TYPE_REGISTERED_SHMEM,
+ TF_SHMEM_TYPE_PM_HIBERNATE,
+};
+
+
+/*
+ * This structure contains a pointer on a coarse page table
+ */
+struct tf_coarse_page_table {
+ /*
+ * Identifies the coarse page table descriptor in
+ * free_coarse_page_tables list
+ */
+ struct list_head list;
+
+ /*
+ * The address of the coarse page table
+ */
+ u32 *descriptors;
+
+ /*
+ * The address of the array containing this coarse page table
+ */
+ struct tf_coarse_page_table_array *parent;
+};
+
+
+#define TF_PAGE_DESCRIPTOR_TYPE_NORMAL 0
+#define TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
+
+/*
+ * This structure describes an array of up to 4 coarse page tables
+ * allocated within a single 4KB page.
+ */
+struct tf_coarse_page_table_array {
+ /*
+ * identifies the element in the coarse_page_table_arrays list
+ */
+ struct list_head list;
+
+ /*
+ * Type of page descriptor
+ * can take any of TF_PAGE_DESCRIPTOR_TYPE_XXX value
+ */
+ u32 type;
+
+ struct tf_coarse_page_table coarse_page_tables[4];
+
+ /*
+ * A counter of the number of coarse pages currently used
+ * the max value should be 4 (one coarse page table is 1KB while one
+ * page is 4KB)
+ */
+ u8 ref_count;
+};
+
+
+/*
+ * This structure describes a list of coarse page table arrays
+ * with some of the coarse page tables free. It is used
+ * when the driver needs to allocate a new coarse page
+ * table.
+ */
+struct tf_coarse_page_table_allocation_context {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * The list of allocated coarse page table arrays
+ */
+ struct list_head coarse_page_table_arrays;
+
+ /*
+ * The list of free coarse page tables
+ */
+ struct list_head free_coarse_page_tables;
+};
+
+
+/*
+ * Fully describes a shared memory block
+ */
+struct tf_shmem_desc {
+ /*
+ * Identifies the shared memory descriptor in the list of free shared
+ * memory descriptors
+ */
+ struct list_head list;
+
+ /*
+ * Identifies the type of shared memory descriptor
+ */
+ enum TF_SHMEM_TYPE type;
+
+ /*
+ * The identifier of the block of shared memory, as returned by the
+ * Secure World.
+ * This identifier is block field of a REGISTER_SHARED_MEMORY answer
+ */
+ u32 block_identifier;
+
+ /* Client buffer */
+ u8 *pBuffer;
+
+ /* Up to eight coarse page table context */
+ struct tf_coarse_page_table *coarse_pg_table[TF_MAX_COARSE_PAGES];
+
+ u32 coarse_pg_table_count;
+
+ /* Reference counter */
+ atomic_t ref_count;
+};
+
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * This structure describes the communication with the Secure World
+ *
+ * Note that this driver supports only one instance of the Secure World
+ */
+struct tf_comm {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * Bit vector with the following possible flags:
+ * - TF_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
+ * the IRQ has been successfuly requested.
+ * - TF_COMM_FLAG_TERMINATING: If set, indicates that the
+ * communication with the Secure World is being terminated.
+ * Transmissions to the Secure World are not permitted
+ * - TF_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
+ * W3B buffer has been allocated.
+ *
+ * This bit vector must be accessed with the kernel's atomic bitwise
+ * operations.
+ */
+ unsigned long flags;
+
+ /*
+ * The virtual address of the L1 shared buffer.
+ */
+ struct tf_l1_shared_buffer *pBuffer;
+
+ /*
+ * The wait queue the client threads are waiting on.
+ */
+ wait_queue_head_t wait_queue;
+
+#ifdef CONFIG_TF_TRUSTZONE
+ /*
+ * The interrupt line used by the Secure World.
+ */
+ int soft_int_irq;
+
+ /* ----- W3B ----- */
+ /* shared memory descriptor to identify the W3B */
+ struct tf_shmem_desc w3b_shmem_desc;
+
+ /* Virtual address of the kernel allocated shared memory */
+ u32 w3b;
+
+ /* offset of data in shared memory coarse pages */
+ u32 w3b_shmem_offset;
+
+ u32 w3b_shmem_size;
+
+ struct tf_coarse_page_table_allocation_context
+ w3b_cpt_alloc_context;
+#endif
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * The SE SDP can only be initialized once...
+ */
+ int se_initialized;
+
+ /* Virtual address of the L0 communication buffer */
+ void *init_shared_buffer;
+
+ /*
+ * Lock to be held by a client when executing an RPC
+ */
+ struct mutex rpc_mutex;
+
+ /*
+ * Lock to protect concurrent accesses to DMA channels
+ */
+ struct mutex dma_mutex;
+#endif
+};
+
+
+#define TF_COMM_FLAG_IRQ_REQUESTED (0)
+#define TF_COMM_FLAG_PA_AVAILABLE (1)
+#define TF_COMM_FLAG_TERMINATING (2)
+#define TF_COMM_FLAG_W3B_ALLOCATED (3)
+#define TF_COMM_FLAG_L1_SHARED_ALLOCATED (4)
+
+/*----------------------------------------------------------------------------*/
+
+struct tf_device_stats {
+ struct kobject kobj;
+
+ struct kobj_type kobj_type;
+
+ struct attribute kobj_stat_attribute;
+
+ struct attribute *kobj_attribute_list[2];
+
+ atomic_t stat_pages_allocated;
+ atomic_t stat_memories_allocated;
+ atomic_t stat_pages_locked;
+};
+
+/*
+ * This structure describes the information about one device handled by the
+ * driver. Note that the driver supports only a single device. see the global
+ * variable g_tf_dev
+
+ */
+struct tf_device {
+ /*
+ * The device number for the device.
+ */
+ dev_t dev_number;
+
+ /*
+ * Interfaces the char device with the kernel.
+ */
+ struct cdev cdev;
+
+#ifdef CONFIG_TF_TEEC
+ struct cdev cdev_teec;
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ struct cdev cdev_ctrl;
+
+ /*
+ * Globals for CUS
+ */
+ /* Current key handles loaded in HWAs */
+ u32 aes1_key_context;
+ u32 des_key_context;
+ bool sham1_is_public;
+
+ /* Object used to serialize HWA accesses */
+ struct semaphore aes1_sema;
+ struct semaphore des_sema;
+ struct semaphore sha_sema;
+
+ /*
+ * An aligned and correctly shaped pre-allocated buffer used for DMA
+ * transfers
+ */
+ u32 dma_buffer_length;
+ u8 *dma_buffer;
+ dma_addr_t dma_buffer_phys;
+
+ /* Workspace allocated at boot time and reserved to the Secure World */
+ u32 workspace_addr;
+ u32 workspace_size;
+
+ /*
+ * A Mutex to provide exclusive locking of the ioctl()
+ */
+ struct mutex dev_mutex;
+#endif
+
+ /*
+ * Communications with the SM.
+ */
+ struct tf_comm sm;
+
+ /*
+ * Lists the connections attached to this device. A connection is
+ * created each time a user space application "opens" a file descriptor
+ * on the driver
+ */
+ struct list_head connection_list;
+
+ /*
+ * The spin lock used to protect concurrent access to the connection
+ * list.
+ */
+ spinlock_t connection_list_lock;
+
+ struct tf_device_stats stats;
+};
+
+/*----------------------------------------------------------------------------*/
+/*
+ * This type describes a connection state.
+ * This is used to determine whether a message is valid or not.
+ *
+ * Messages are only valid in a certain device state.
+ * Messages may be invalidated between the start of the ioctl call and the
+ * moment the message is sent to the Secure World.
+ *
+ * TF_CONN_STATE_NO_DEVICE_CONTEXT :
+ * The connection has no DEVICE_CONTEXT created and no
+ * CREATE_DEVICE_CONTEXT being processed by the Secure World
+ * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
+ * The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure
+ * World
+ * TF_CONN_STATE_VALID_DEVICE_CONTEXT :
+ * The connection has a DEVICE_CONTEXT created and no
+ * DESTROY_DEVICE_CONTEXT is being processed by the Secure World
+ * TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
+ * The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure
+ * World
+ */
+enum TF_CONN_STATE {
+ TF_CONN_STATE_NO_DEVICE_CONTEXT = 0,
+ TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
+ TF_CONN_STATE_VALID_DEVICE_CONTEXT,
+ TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
+};
+
+
+/*
+ * This type describes the status of the command.
+ *
+ * PENDING:
+ * The initial state; the command has not been sent yet.
+ * SENT:
+ * The command has been sent, we are waiting for an answer.
+ * ABORTED:
+ * The command cannot be sent because the device context is invalid.
+ * Note that this only covers the case where some other thread
+ * sent a DESTROY_DEVICE_CONTEXT command.
+ */
+enum TF_COMMAND_STATE {
+ TF_COMMAND_STATE_PENDING = 0,
+ TF_COMMAND_STATE_SENT,
+ TF_COMMAND_STATE_ABORTED
+};
+
+/*
+ * The origin of connection parameters such as login data and
+ * memory reference pointers.
+ *
+ * PROCESS: the calling process. All arguments must be validated.
+ * KERNEL: kernel code. All arguments can be trusted by this driver.
+ */
+enum TF_CONNECTION_OWNER {
+ TF_CONNECTION_OWNER_PROCESS = 0,
+ TF_CONNECTION_OWNER_KERNEL,
+};
+
+
+/*
+ * This structure describes a connection to the driver
+ * A connection is created each time an application opens a file descriptor on
+ * the driver
+ */
+struct tf_connection {
+ /*
+ * Identifies the connection in the list of the connections attached to
+ * the same device.
+ */
+ struct list_head list;
+
+ /*
+ * State of the connection.
+ */
+ enum TF_CONN_STATE state;
+
+ /*
+ * A pointer to the corresponding device structure
+ */
+ struct tf_device *dev;
+
+ /*
+ * A spinlock to use to access state
+ */
+ spinlock_t state_lock;
+
+ /*
+ * Counts the number of operations currently pending on the connection.
+ * (for debug only)
+ */
+ atomic_t pending_op_count;
+
+ /*
+ * A handle for the device context
+ */
+ u32 device_context;
+
+ /*
+ * Lists the used shared memory descriptors
+ */
+ struct list_head used_shmem_list;
+
+ /*
+ * Lists the free shared memory descriptors
+ */
+ struct list_head free_shmem_list;
+
+ /*
+ * A mutex to use to access this structure
+ */
+ struct mutex shmem_mutex;
+
+ /*
+ * Counts the number of shared memories registered.
+ */
+ atomic_t shmem_count;
+
+ /*
+ * Page to retrieve memory properties when
+ * registering shared memory through REGISTER_SHARED_MEMORY
+ * messages
+ */
+ struct vm_area_struct **vmas;
+
+ /*
+ * coarse page table allocation context
+ */
+ struct tf_coarse_page_table_allocation_context cpt_alloc_context;
+
+ /* The origin of connection parameters such as login data and
+ memory reference pointers. */
+ enum TF_CONNECTION_OWNER owner;
+
+#ifdef CONFIG_TF_ZEBRA
+ /* Lists all the Cryptoki Update Shortcuts */
+ struct list_head shortcut_list;
+
+ /* Lock to protect concurrent accesses to shortcut_list */
+ spinlock_t shortcut_list_lock;
+#endif
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The operation_id field of a message points to this structure.
+ * It is used to identify the thread that triggered the message transmission
+ * Whoever reads an answer can wake up that thread using the completion event
+ */
+struct tf_answer_struct {
+ bool answer_copied;
+ union tf_answer *answer;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * The ASCII-C string representation of the base name of the devices managed by
+ * this driver.
+ */
+#define TF_DEVICE_BASE_NAME "tf_driver"
+
+
+/**
+ * The major and minor numbers of the registered character device driver.
+ * Only 1 instance of the driver is supported.
+ */
+#define TF_DEVICE_MINOR_NUMBER (0)
+
+struct tf_device *tf_get_device(void);
+
+#define CLEAN_CACHE_CFG_MASK (~0xC) /* 1111 0011 */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Kernel Differences
+ */
+
+#ifdef CONFIG_ANDROID
+#define GROUP_INFO get_current_groups()
+#else
+#define GROUP_INFO (current->group_info)
+#endif
+
+#endif /* !defined(__TF_DEFS_H__) */
diff --git a/security/smc/tf_device.c b/security/smc/tf_device.c
new file mode 100644
index 0000000..86313de
--- /dev/null
+++ b/security/smc/tf_device.c
@@ -0,0 +1,652 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pm.h>
+#include <linux/syscore_ops.h>
+#include <linux/vmalloc.h>
+#include <linux/signal.h>
+#ifdef CONFIG_ANDROID
+#include <linux/device.h>
+#endif
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+#include "tf_comm.h"
+#ifdef CONFIG_TF_ZEBRA
+#include <plat/cpu.h>
+#include "tf_zebra.h"
+#endif
+
+#include "s_version.h"
+
+/*----------------------------------------------------------------------------
+ * Forward Declarations
+ *----------------------------------------------------------------------------*/
+
+/*
+ * Creates and registers the device to be managed by the specified driver.
+ *
+ * Returns zero upon successful completion, or an appropriate error code upon
+ * failure.
+ */
+static int tf_device_register(void);
+
+
+/*
+ * Implements the device Open callback.
+ */
+static int tf_device_open(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device Release callback.
+ */
+static int tf_device_release(
+ struct inode *inode,
+ struct file *file);
+
+
+/*
+ * Implements the device ioctl callback.
+ */
+static long tf_device_ioctl(
+ struct file *file,
+ unsigned int ioctl_num,
+ unsigned long ioctl_param);
+
+
+/*
+ * Implements the device shutdown callback.
+ */
+static void tf_device_shutdown(void);
+
+
+/*
+ * Implements the device suspend callback.
+ */
+static int tf_device_suspend(void);
+
+
+/*
+ * Implements the device resume callback.
+ */
+static void tf_device_resume(void);
+
+
+/*---------------------------------------------------------------------------
+ * Module Parameters
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The device major number used to register a unique character device driver.
+ * Let the default value be 122
+ */
+static int device_major_number = 122;
+
+module_param(device_major_number, int, 0000);
+MODULE_PARM_DESC(device_major_number,
+ "The device major number used to register a unique character "
+ "device driver");
+
+#ifdef CONFIG_TF_TRUSTZONE
+/**
+ * The softint interrupt line used by the Secure World.
+ */
+static int soft_interrupt = -1;
+
+module_param(soft_interrupt, int, 0000);
+MODULE_PARM_DESC(soft_interrupt,
+ "The softint interrupt line used by the Secure world");
+#endif
+
+#ifdef CONFIG_ANDROID
+static struct class *tf_class;
+#endif
+
+/*----------------------------------------------------------------------------
+ * Global Variables
+ *----------------------------------------------------------------------------*/
+
+/*
+ * tf_driver character device definitions.
+ * read and write methods are not defined
+ * and will return an error if used by user space
+ */
+static const struct file_operations g_tf_device_file_ops = {
+ .owner = THIS_MODULE,
+ .open = tf_device_open,
+ .release = tf_device_release,
+ .unlocked_ioctl = tf_device_ioctl,
+ .llseek = no_llseek,
+};
+
+/* The single device supported by this driver */
+static struct tf_device g_tf_dev = {0, };
+
+/*----------------------------------------------------------------------------
+ * Implementations
+ *----------------------------------------------------------------------------*/
+
+struct tf_device *tf_get_device(void)
+{
+ return &g_tf_dev;
+}
+
+/*
+ * displays the driver stats
+ */
+static ssize_t kobject_show(struct kobject *kobj,
+ struct attribute *attribute, char *buf)
+{
+ struct tf_device_stats *dev_stats = &g_tf_dev.stats;
+ u32 pages_allocated;
+ u32 pages_locked;
+ u32 memories_allocated;
+
+ memories_allocated =
+ atomic_read(&(dev_stats->stat_memories_allocated));
+ pages_allocated =
+ atomic_read(&(dev_stats->stat_pages_allocated));
+ pages_locked = atomic_read(&(dev_stats->stat_pages_locked));
+
+ /*
+ * AFY: could we add the number of context switches (call to the SMI
+ * instruction)
+ */
+
+ return snprintf(buf, PAGE_SIZE,
+ "stat.memories.allocated: %d\n"
+ "stat.pages.allocated: %d\n"
+ "stat.pages.locked: %d\n",
+ memories_allocated,
+ pages_allocated,
+ pages_locked);
+}
+
+static const struct sysfs_ops kobj_sysfs_operations = {
+ .show = kobject_show,
+};
+
+/*----------------------------------------------------------------------------*/
+
+static const struct syscore_ops g_tf_syscore_ops = {
+ .shutdown = tf_device_shutdown,
+ .suspend = tf_device_suspend,
+ .resume = tf_device_resume,
+};
+
+/*
+ * First routine called when the kernel module is loaded
+ */
+static int __init tf_device_register(void)
+{
+ int error;
+ struct tf_device *dev = &g_tf_dev;
+ struct tf_device_stats *dev_stats = &dev->stats;
+
+ dprintk(KERN_INFO "tf_device_register()\n");
+
+ /*
+ * Initialize the device
+ */
+ dev->dev_number = MKDEV(device_major_number,
+ TF_DEVICE_MINOR_NUMBER);
+ cdev_init(&dev->cdev, &g_tf_device_file_ops);
+ dev->cdev.owner = THIS_MODULE;
+
+ INIT_LIST_HEAD(&dev->connection_list);
+ spin_lock_init(&dev->connection_list_lock);
+
+ /* register the sysfs object driver stats */
+ dev_stats->kobj_type.sysfs_ops = &kobj_sysfs_operations;
+
+ dev_stats->kobj_stat_attribute.name = "info";
+ dev_stats->kobj_stat_attribute.mode = S_IRUGO;
+ dev_stats->kobj_attribute_list[0] =
+ &dev_stats->kobj_stat_attribute;
+
+ dev_stats->kobj_type.default_attrs =
+ dev_stats->kobj_attribute_list,
+ error = kobject_init_and_add(&(dev_stats->kobj),
+ &(dev_stats->kobj_type), NULL, "%s",
+ TF_DEVICE_BASE_NAME);
+ if (error) {
+ kobject_put(&dev_stats->kobj);
+ goto kobject_init_and_add_failed;
+ }
+
+ register_syscore_ops((struct syscore_ops *)&g_tf_syscore_ops);
+
+ /*
+ * Register the char device.
+ */
+ printk(KERN_INFO "Registering char device %s (%u:%u)\n",
+ TF_DEVICE_BASE_NAME,
+ MAJOR(dev->dev_number),
+ MINOR(dev->dev_number));
+ error = register_chrdev_region(dev->dev_number, 1,
+ TF_DEVICE_BASE_NAME);
+ if (error != 0) {
+ printk(KERN_ERR "tf_device_register():"
+ " register_chrdev_region failed (error %d)!\n",
+ error);
+ goto register_chrdev_region_failed;
+ }
+
+ error = cdev_add(&dev->cdev, dev->dev_number, 1);
+ if (error != 0) {
+ printk(KERN_ERR "tf_device_register(): "
+ "cdev_add failed (error %d)!\n",
+ error);
+ goto cdev_add_failed;
+ }
+
+ /*
+ * Initialize the communication with the Secure World.
+ */
+#ifdef CONFIG_TF_TRUSTZONE
+ dev->sm.soft_int_irq = soft_interrupt;
+#endif
+ error = tf_init(&g_tf_dev.sm);
+ if (error != S_SUCCESS) {
+ dprintk(KERN_ERR "tf_device_register(): "
+ "tf_init failed (error %d)!\n",
+ error);
+ goto init_failed;
+ }
+
+#ifdef CONFIG_ANDROID
+ tf_class = class_create(THIS_MODULE, TF_DEVICE_BASE_NAME);
+ device_create(tf_class, NULL,
+ dev->dev_number,
+ NULL, TF_DEVICE_BASE_NAME);
+#endif
+
+#ifdef CONFIG_TF_ZEBRA
+ /*
+ * Initializes the /dev/tf_ctrl device node.
+ */
+ error = tf_ctrl_device_register();
+ if (error)
+ goto init_failed;
+#endif
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+ run_bogo_mips();
+ address_cache_property((unsigned long) &tf_device_register);
+#endif
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "tf_device_register(): Success\n");
+ return 0;
+
+ /*
+ * Error: undo all operations in the reverse order
+ */
+init_failed:
+ cdev_del(&dev->cdev);
+cdev_add_failed:
+ unregister_chrdev_region(dev->dev_number, 1);
+register_chrdev_region_failed:
+ unregister_syscore_ops((struct syscore_ops *)&g_tf_syscore_ops);
+kobject_init_and_add_failed:
+ kobject_del(&g_tf_dev.stats.kobj);
+
+ dprintk(KERN_INFO "tf_device_register(): Failure (error %d)\n",
+ error);
+ return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_open(struct inode *inode, struct file *file)
+{
+ int error;
+ struct tf_device *dev = &g_tf_dev;
+ struct tf_connection *connection = NULL;
+
+ dprintk(KERN_INFO "tf_device_open(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ /* Dummy lseek for non-seekable driver */
+ error = nonseekable_open(inode, file);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "nonseekable_open failed (error %d)!\n",
+ file, error);
+ goto error;
+ }
+
+#ifndef CONFIG_ANDROID
+ /*
+ * Check file flags. We only autthorize the O_RDWR access
+ */
+ if (file->f_flags != O_RDWR) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "Invalid access mode %u\n",
+ file, file->f_flags);
+ error = -EACCES;
+ goto error;
+ }
+#endif
+
+ /*
+ * Open a new connection.
+ */
+
+ error = tf_open(dev, file, &connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "tf_open failed (error %d)!\n",
+ file, error);
+ goto error;
+ }
+
+ file->private_data = connection;
+
+ /*
+ * Send the CreateDeviceContext command to the secure
+ */
+ error = tf_create_device_context(connection);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_device_open(%p): "
+ "tf_create_device_context failed (error %d)!\n",
+ file, error);
+ goto error1;
+ }
+
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "tf_device_open(%p): Success (connection=%p)\n",
+ file, connection);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+
+error1:
+ tf_close(connection);
+error:
+ dprintk(KERN_INFO "tf_device_open(%p): Failure (error %d)\n",
+ file, error);
+ return error;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_release(struct inode *inode, struct file *file)
+{
+ struct tf_connection *connection;
+
+ dprintk(KERN_INFO "tf_device_release(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ connection = tf_conn_from_file(file);
+ tf_close(connection);
+
+ dprintk(KERN_INFO "tf_device_release(%p): Success\n", file);
+ return 0;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static long tf_device_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int result = S_SUCCESS;
+ struct tf_connection *connection;
+ union tf_command command;
+ struct tf_command_header header;
+ union tf_answer answer;
+ u32 command_size;
+ u32 answer_size;
+ void *user_answer;
+
+ dprintk(KERN_INFO "tf_device_ioctl(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ switch (ioctl_num) {
+ case IOCTL_TF_GET_VERSION:
+ /* ioctl is asking for the driver interface version */
+ result = TF_DRIVER_INTERFACE_VERSION;
+ goto exit;
+
+ case IOCTL_TF_EXCHANGE:
+ /*
+ * ioctl is asking to perform a message exchange with the Secure
+ * Module
+ */
+
+ /*
+ * Make a local copy of the data from the user application
+ * This routine checks the data is readable
+ *
+ * Get the header first.
+ */
+ if (copy_from_user(&header,
+ (struct tf_command_header *)ioctl_param,
+ sizeof(struct tf_command_header))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ /* size in words of u32 */
+ command_size = header.message_size +
+ sizeof(struct tf_command_header)/sizeof(u32);
+ if (command_size > sizeof(command)/sizeof(u32)) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Buffer overflow: too many bytes to copy %d\n",
+ file, command_size);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ if (copy_from_user(&command,
+ (union tf_command *)ioctl_param,
+ command_size * sizeof(u32))) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Cannot access ioctl parameter %p\n",
+ file, (void *) ioctl_param);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ connection = tf_conn_from_file(file);
+ BUG_ON(connection == NULL);
+
+ /*
+ * The answer memory space address is in the operation_id field
+ */
+ user_answer = (void *) command.header.operation_id;
+
+ atomic_inc(&(connection->pending_op_count));
+
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Sending message type 0x%08x\n",
+ file, command.header.message_type);
+
+ switch (command.header.message_type) {
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ result = tf_open_client_session(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ result = tf_close_client_session(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ result = tf_register_shared_memory(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ result = tf_release_shared_memory(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ result = tf_invoke_client_command(connection,
+ &command, &answer);
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ result = tf_cancel_client_command(connection,
+ &command, &answer);
+ break;
+
+ default:
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Incorrect message type (0x%08x)!\n",
+ connection, command.header.message_type);
+ result = -EOPNOTSUPP;
+ break;
+ }
+
+ atomic_dec(&(connection->pending_op_count));
+
+ if (result != 0) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Operation returning error code 0x%08x)!\n",
+ file, result);
+ goto exit;
+ }
+
+ /*
+ * Copy the answer back to the user space application.
+ * The driver does not check this field, only copy back to user
+ * space the data handed over by Secure World
+ */
+ answer_size = answer.header.message_size +
+ sizeof(struct tf_answer_header)/sizeof(u32);
+ if (copy_to_user(user_answer,
+ &answer, answer_size * sizeof(u32))) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Failed to copy back the full command "
+ "answer to %p\n", file, user_answer);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ /* successful completion */
+ dprintk(KERN_INFO "tf_device_ioctl(%p): Success\n", file);
+ break;
+
+ case IOCTL_TF_GET_DESCRIPTION: {
+ /* ioctl asking for the version information buffer */
+ struct tf_version_information_buffer *pInfoBuffer;
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION:(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ pInfoBuffer =
+ ((struct tf_version_information_buffer *) ioctl_param);
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION1: "
+ "driver_description=\"%64s\"\n", S_VERSION_STRING);
+
+ if (copy_to_user(pInfoBuffer->driver_description,
+ S_VERSION_STRING,
+ strlen(S_VERSION_STRING) + 1)) {
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Fail to copy back the driver description "
+ "to %p\n",
+ file, pInfoBuffer->driver_description);
+ result = -EFAULT;
+ goto exit;
+ }
+
+ dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION2: "
+ "secure_world_description=\"%64s\"\n",
+ tf_get_description(&g_tf_dev.sm));
+
+ if (copy_to_user(pInfoBuffer->secure_world_description,
+ tf_get_description(&g_tf_dev.sm),
+ TF_DESCRIPTION_BUFFER_LENGTH)) {
+ dprintk(KERN_WARNING "tf_device_ioctl(%p): "
+ "Failed to copy back the secure world "
+ "description to %p\n",
+ file, pInfoBuffer->secure_world_description);
+ result = -EFAULT;
+ goto exit;
+ }
+ break;
+ }
+
+ default:
+ dprintk(KERN_ERR "tf_device_ioctl(%p): "
+ "Unknown IOCTL code 0x%08x!\n",
+ file, ioctl_num);
+ result = -EOPNOTSUPP;
+ goto exit;
+ }
+
+exit:
+ return result;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static void tf_device_shutdown(void)
+{
+ tf_power_management(&g_tf_dev.sm, TF_POWER_OPERATION_SHUTDOWN);
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_device_suspend(void)
+{
+ dprintk(KERN_INFO "%s\n", __func__);
+ return tf_power_management(&g_tf_dev.sm, TF_POWER_OPERATION_HIBERNATE);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+static void tf_device_resume(void)
+{
+ tf_power_management(&g_tf_dev.sm, TF_POWER_OPERATION_RESUME);
+}
+
+
+/*----------------------------------------------------------------------------*/
+
+module_init(tf_device_register);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/smc/tf_device_mshield.c b/security/smc/tf_device_mshield.c
new file mode 100644
index 0000000..6139ad6
--- /dev/null
+++ b/security/smc/tf_device_mshield.c
@@ -0,0 +1,351 @@
+/**
+ * Copyright (c) 2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pm.h>
+#include <linux/sysdev.h>
+#include <linux/vmalloc.h>
+#include <linux/signal.h>
+#ifdef CONFIG_ANDROID
+#include <linux/device.h>
+#endif
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_conn.h"
+#include "tf_comm.h"
+#include "tf_zebra.h"
+
+#include "s_version.h"
+
+#define TF_PA_CTRL_START 0x1
+#define TF_PA_CTRL_STOP 0x2
+
+#ifdef CONFIG_ANDROID
+static struct class *tf_ctrl_class;
+#endif
+
+#define TF_DEVICE_CTRL_BASE_NAME "tf_ctrl"
+
+struct tf_pa_ctrl {
+ u32 nPACommand;
+
+ u32 pa_size;
+ u8 *pa_buffer;
+
+ u32 conf_size;
+ u8 *conf_buffer;
+};
+
+static int tf_ctrl_check_omap_type(void)
+{
+ /* No need to do anything on a GP device */
+ switch (omap_type()) {
+ case OMAP2_DEVICE_TYPE_GP:
+ dprintk(KERN_INFO "SMC: Running on a GP device\n");
+ return 0;
+
+ case OMAP2_DEVICE_TYPE_EMU:
+ case OMAP2_DEVICE_TYPE_SEC:
+ /*case OMAP2_DEVICE_TYPE_TEST:*/
+ dprintk(KERN_INFO "SMC: Running on a EMU or HS device\n");
+ return 1;
+
+ default:
+ printk(KERN_ERR "SMC: unknown omap type %x\n", omap_type());
+ return -EFAULT;
+ }
+}
+
+#define IOCTL_TF_PA_CTRL _IOWR('z', 0xFF, struct tf_pa_ctrl)
+
+static long tf_ctrl_device_ioctl(struct file *file, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ int result = S_SUCCESS;
+ struct tf_pa_ctrl pa_ctrl;
+ u8 *pa_buffer = NULL;
+ u8 *conf_buffer = NULL;
+ struct tf_device *dev = tf_get_device();
+
+ dprintk(KERN_INFO "tf_ctrl_device_ioctl(%p, %u, %p)\n",
+ file, ioctl_num, (void *) ioctl_param);
+
+ mutex_lock(&dev->dev_mutex);
+
+ if (ioctl_num != IOCTL_TF_PA_CTRL) {
+ dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
+ "ioctl number is invalid (%p)\n",
+ file, (void *)ioctl_num);
+
+ result = -EFAULT;
+ goto exit;
+ }
+
+ if ((ioctl_param & 0x3) != 0) {
+ dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
+ "ioctl command message pointer is not word "
+ "aligned (%p)\n",
+ file, (void *)ioctl_param);
+
+ result = -EFAULT;
+ goto exit;
+ }
+
+ if (copy_from_user(&pa_ctrl, (struct tf_pa_ctrl *)ioctl_param,
+ sizeof(struct tf_pa_ctrl))) {
+ dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
+ "cannot access ioctl parameter (%p)\n",
+ file, (void *)ioctl_param);
+
+ result = -EFAULT;
+ goto exit;
+ }
+
+ switch (pa_ctrl.nPACommand) {
+ case TF_PA_CTRL_START:
+ dprintk(KERN_INFO "tf_ctrl_device_ioctl(%p): "
+ "Start the SMC PA (%d bytes) with conf (%d bytes)\n",
+ file, pa_ctrl.pa_size, pa_ctrl.conf_size);
+
+ pa_buffer = (u8 *) internal_kmalloc(pa_ctrl.pa_size,
+ GFP_KERNEL);
+ if (pa_buffer == NULL) {
+ dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
+ "Out of memory for PA buffer\n", file);
+
+ result = -ENOMEM;
+ goto exit;
+ }
+
+ if (copy_from_user(
+ pa_buffer, pa_ctrl.pa_buffer, pa_ctrl.pa_size)) {
+ dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
+ "Cannot access PA buffer (%p)\n",
+ file, (void *) pa_ctrl.pa_buffer);
+
+ internal_kfree(pa_buffer);
+
+ result = -EFAULT;
+ goto exit;
+ }
+
+ if (pa_ctrl.conf_size > 0) {
+ conf_buffer = (u8 *) internal_kmalloc(
+ pa_ctrl.conf_size, GFP_KERNEL);
+ if (conf_buffer == NULL) {
+ internal_kfree(pa_buffer);
+
+ result = -ENOMEM;
+ goto exit;
+ }
+
+ if (copy_from_user(conf_buffer,
+ pa_ctrl.conf_buffer, pa_ctrl.conf_size)) {
+ internal_kfree(pa_buffer);
+ internal_kfree(conf_buffer);
+
+ result = -EFAULT;
+ goto exit;
+ }
+ }
+
+ if (dev->workspace_addr == 0) {
+ result = -ENOMEM;
+ goto exit;
+ }
+
+ result = tf_start(&dev->sm,
+ dev->workspace_addr,
+ dev->workspace_size,
+ pa_buffer,
+ pa_ctrl.pa_size,
+ conf_buffer,
+ pa_ctrl.conf_size);
+ if (result)
+ dprintk(KERN_ERR "SMC: start failed\n");
+ else
+ dprintk(KERN_INFO "SMC: started\n");
+
+ internal_kfree(pa_buffer);
+ internal_kfree(conf_buffer);
+ break;
+
+ case TF_PA_CTRL_STOP:
+ dprintk(KERN_INFO "tf_ctrl_device_ioctl(%p): "
+ "Stop the SMC PA\n", file);
+
+ result = tf_power_management(&dev->sm,
+ TF_POWER_OPERATION_SHUTDOWN);
+ if (result)
+ dprintk(KERN_WARNING "SMC: stop failed [0x%x]\n",
+ result);
+ else
+ dprintk(KERN_INFO "SMC: stopped\n");
+ break;
+
+ default:
+ result = -EOPNOTSUPP;
+ break;
+ }
+
+exit:
+ mutex_unlock(&dev->dev_mutex);
+ return result;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int tf_ctrl_device_open(struct inode *inode, struct file *file)
+{
+ int error;
+
+ dprintk(KERN_INFO "tf_ctrl_device_open(%u:%u, %p)\n",
+ imajor(inode), iminor(inode), file);
+
+ /* Dummy lseek for non-seekable driver */
+ error = nonseekable_open(inode, file);
+ if (error != 0) {
+ dprintk(KERN_ERR "tf_ctrl_device_open(%p): "
+ "nonseekable_open failed (error %d)!\n",
+ file, error);
+ goto error;
+ }
+
+#ifndef CONFIG_ANDROID
+ /*
+ * Check file flags. We only autthorize the O_RDWR access
+ */
+ if (file->f_flags != O_RDWR) {
+ dprintk(KERN_ERR "tf_ctrl_device_open(%p): "
+ "Invalid access mode %u\n",
+ file, file->f_flags);
+ error = -EACCES;
+ goto error;
+ }
+#endif
+
+ error = tf_ctrl_check_omap_type();
+ if (error <= 0)
+ return error;
+
+ /*
+ * Successful completion.
+ */
+
+ dprintk(KERN_INFO "tf_ctrl_device_open(%p): Success\n", file);
+ return 0;
+
+ /*
+ * Error handling.
+ */
+error:
+ dprintk(KERN_INFO "tf_ctrl_device_open(%p): Failure (error %d)\n",
+ file, error);
+ return error;
+}
+
+static const struct file_operations g_tf_ctrl_device_file_ops = {
+ .owner = THIS_MODULE,
+ .open = tf_ctrl_device_open,
+ .unlocked_ioctl = tf_ctrl_device_ioctl,
+ .llseek = no_llseek,
+};
+
+int __init tf_ctrl_device_register(void)
+{
+ int error;
+ struct tf_device *dev = tf_get_device();
+
+ cdev_init(&dev->cdev_ctrl, &g_tf_ctrl_device_file_ops);
+ dev->cdev_ctrl.owner = THIS_MODULE;
+
+ error = register_chrdev_region(dev->dev_number + 1, 1,
+ TF_DEVICE_CTRL_BASE_NAME);
+ if (error)
+ return error;
+
+ error = cdev_add(&dev->cdev_ctrl,
+ dev->dev_number + 1, 1);
+ if (error) {
+ cdev_del(&(dev->cdev_ctrl));
+ unregister_chrdev_region(dev->dev_number + 1, 1);
+ return error;
+ }
+
+#ifdef CONFIG_ANDROID
+ tf_ctrl_class = class_create(THIS_MODULE, TF_DEVICE_CTRL_BASE_NAME);
+ device_create(tf_ctrl_class, NULL,
+ dev->dev_number + 1,
+ NULL, TF_DEVICE_CTRL_BASE_NAME);
+#endif
+
+ mutex_init(&dev->dev_mutex);
+
+ return error;
+}
+
+static int __initdata smc_mem;
+static int __initdata smc_address;
+
+void __init tf_allocate_workspace(void)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (tf_ctrl_check_omap_type() <= 0)
+ return;
+
+ dev->workspace_size = smc_mem;
+ if (dev->workspace_size < 3*SZ_1M)
+ dev->workspace_size = 3*SZ_1M;
+
+ if (smc_address == 0)
+#if 0
+ dev->workspace_addr = (u32) __pa(__alloc_bootmem(
+ dev->workspace_size, SZ_1M, __pa(MAX_DMA_ADDRESS)));
+#else
+ dev->workspace_addr = (u32) 0xBFD00000;
+#endif
+ else
+ dev->workspace_addr = smc_address;
+
+ pr_info("SMC: Allocated workspace of %x Bytes at (0x%x)\n",
+ dev->workspace_size,
+ dev->workspace_addr);
+}
+
+static int __init tf_mem_setup(char *str)
+{
+ smc_mem = memparse(str, &str);
+ if (*str == '@') {
+ str += 1;
+ get_option(&str, &smc_address);
+ }
+ return 0;
+}
+
+early_param("smc_mem", tf_mem_setup);
diff --git a/security/smc/tf_dma.c b/security/smc/tf_dma.c
new file mode 100644
index 0000000..a424dbb
--- /dev/null
+++ b/security/smc/tf_dma.c
@@ -0,0 +1,106 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_dma.h"
+
+#include <asm/atomic.h>
+
+static atomic_t g_dmaEventFlag = ATOMIC_INIT(0);
+
+/*------------------------------------------------------------------------ */
+/*
+ * Internal functions
+ */
+
+static void tf_dma_callback(int lch, u16 ch_status, void *data)
+{
+ atomic_inc(&g_dmaEventFlag);
+}
+
+/*------------------------------------------------------------------------ */
+/*
+ * Public DMA API
+ */
+
+u32 tf_dma_request(int *lch)
+{
+ int dma_ch_out = 0;
+
+ if (lch == NULL)
+ return PUBLIC_CRYPTO_ERR_BAD_PARAMETERS;
+
+ if (omap_request_dma(0, "SMC Public Crypto",
+ tf_dma_callback, NULL, &dma_ch_out) != 0)
+ return PUBLIC_CRYPTO_ERR_OUT_OF_MEMORY;
+
+ omap_disable_dma_irq(dma_ch_out, OMAP_DMA_DROP_IRQ |
+ OMAP_DMA_BLOCK_IRQ);
+
+ *lch = dma_ch_out;
+
+ return PUBLIC_CRYPTO_OPERATION_SUCCESS;
+}
+
+/*------------------------------------------------------------------------ */
+
+void tf_dma_start(int lch, int interrupt_mask)
+{
+ atomic_set(&g_dmaEventFlag, 0);
+ omap_enable_dma_irq(lch, interrupt_mask);
+ omap_start_dma(lch);
+}
+
+/*------------------------------------------------------------------------ */
+
+void tf_dma_wait(int nr_of_cb)
+{
+ while (atomic_read(&g_dmaEventFlag) < nr_of_cb)
+ cpu_relax();
+}
+
+/*------------------------------------------------------------------------ */
+/*
+ * Perform common DMA channel setup, used to factorize the code
+ *
+ * Output: struct omap_dma_channel_params *dma_channel
+ * Inputs: u32 nb_blocks Number of block of the transfer
+ * u32 nb_elements Number of elements of the transfer
+ * u32 dst_start Destination address
+ * u32 src_start Source address
+ * u32 trigger_id Trigger ID
+ */
+void tf_dma_set_channel_common_params(
+ struct omap_dma_channel_params *dma_channel,
+ u32 nb_blocks, u32 nb_elements,
+ u32 dst_start, u32 src_start, u32 trigger_id)
+{
+ dma_channel->data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_channel->elem_count = nb_elements;
+ dma_channel->frame_count = nb_blocks;
+ dma_channel->src_ei = 0;
+ dma_channel->src_fi = 0;
+ dma_channel->dst_ei = 0;
+ dma_channel->dst_fi = 0;
+ dma_channel->sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_channel->src_start = src_start;
+ dma_channel->dst_start = dst_start;
+ dma_channel->trigger = trigger_id;
+}
diff --git a/security/smc/tf_dma.h b/security/smc/tf_dma.h
new file mode 100644
index 0000000..3492241
--- /dev/null
+++ b/security/smc/tf_dma.h
@@ -0,0 +1,64 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_PUBLIC_DMA_H
+#define __TF_PUBLIC_DMA_H
+
+#include <linux/dma-mapping.h>
+#include <plat/dma.h>
+#include <plat/dma-44xx.h>
+
+#include "tf_crypto.h"
+
+/*-------------------------------------------------------------------------- */
+/*
+ * Public DMA API
+ */
+
+/*
+ * CEN Masks
+ */
+#define DMA_CEN_Elts_per_Frame_AES 4
+#define DMA_CEN_Elts_per_Frame_DES 2
+#define DMA_CEN_Elts_per_Frame_SHA 16
+
+/*
+ * Request a DMA channel
+ */
+u32 tf_dma_request(int *lch);
+
+/**
+ * This function waits for the DMA IRQ.
+ */
+void tf_dma_wait(int nr_of_cb);
+
+/*
+ * This function starts a DMA operation.
+ *
+ * lch DMA channel ID.
+ * interrupt_mask Configures the Channel Interrupt Control Register.
+ */
+void tf_dma_start(int lch, int interrupt_mask);
+
+void tf_dma_set_channel_common_params(
+ struct omap_dma_channel_params *dma_channel,
+ u32 nb_blocks, u32 nb_elements, u32 dst_start,
+ u32 src_start, u32 trigger_id);
+
+#endif /*__TF_PUBLIC_DMA_H */
diff --git a/security/smc/tf_protocol.h b/security/smc/tf_protocol.h
new file mode 100644
index 0000000..d7bbd7d
--- /dev/null
+++ b/security/smc/tf_protocol.h
@@ -0,0 +1,669 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_PROTOCOL_H__
+#define __TF_PROTOCOL_H__
+
+/*----------------------------------------------------------------------------
+ *
+ * This header file defines the structure used in the SChannel Protocol.
+ * See your Product Reference Manual for a specification of the SChannel
+ * protocol.
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The driver interface version returned by the version ioctl
+ */
+#define TF_DRIVER_INTERFACE_VERSION 0x04000000
+
+/*
+ * Protocol version handling
+ */
+#define TF_S_PROTOCOL_MAJOR_VERSION (0x06)
+#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
+#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
+
+/*
+ * The S flag of the config_flag_s register.
+ */
+#define TF_CONFIG_FLAG_S (1 << 3)
+
+/*
+ * The TimeSlot field of the sync_serial_n register.
+ */
+#define TF_SYNC_SERIAL_TIMESLOT_N (1)
+
+/*
+ * status_s related defines.
+ */
+#define TF_STATUS_P_MASK (0X00000001)
+#define TF_STATUS_POWER_STATE_SHIFT (3)
+#define TF_STATUS_POWER_STATE_MASK (0x1F << TF_STATUS_POWER_STATE_SHIFT)
+
+/*
+ * Possible power states of the POWER_STATE field of the status_s register
+ */
+#define TF_POWER_MODE_COLD_BOOT (0)
+#define TF_POWER_MODE_WARM_BOOT (1)
+#define TF_POWER_MODE_ACTIVE (3)
+#define TF_POWER_MODE_READY_TO_SHUTDOWN (5)
+#define TF_POWER_MODE_READY_TO_HIBERNATE (7)
+#define TF_POWER_MODE_WAKEUP (8)
+#define TF_POWER_MODE_PANIC (15)
+
+/*
+ * Possible command values for MANAGEMENT commands
+ */
+#define TF_MANAGEMENT_HIBERNATE (1)
+#define TF_MANAGEMENT_SHUTDOWN (2)
+#define TF_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
+#define TF_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
+
+/*
+ * The capacity of the Normal Word message queue, in number of slots.
+ */
+#define TF_N_MESSAGE_QUEUE_CAPACITY (512)
+
+/*
+ * The capacity of the Secure World message answer queue, in number of slots.
+ */
+#define TF_S_ANSWER_QUEUE_CAPACITY (256)
+
+/*
+ * The value of the S-timeout register indicating an infinite timeout.
+ */
+#define TF_S_TIMEOUT_0_INFINITE (0xFFFFFFFF)
+#define TF_S_TIMEOUT_1_INFINITE (0xFFFFFFFF)
+
+/*
+ * The value of the S-timeout register indicating an immediate timeout.
+ */
+#define TF_S_TIMEOUT_0_IMMEDIATE (0x0)
+#define TF_S_TIMEOUT_1_IMMEDIATE (0x0)
+
+/*
+ * Identifies the get protocol version SMC.
+ */
+#define TF_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
+
+/*
+ * Identifies the init SMC.
+ */
+#define TF_SMC_INIT (0XFFFFFFFF)
+
+/*
+ * Identifies the reset irq SMC.
+ */
+#define TF_SMC_RESET_IRQ (0xFFFFFFFE)
+
+/*
+ * Identifies the SET_W3B SMC.
+ */
+#define TF_SMC_WAKE_UP (0xFFFFFFFD)
+
+/*
+ * Identifies the STOP SMC.
+ */
+#define TF_SMC_STOP (0xFFFFFFFC)
+
+/*
+ * Identifies the n-yield SMC.
+ */
+#define TF_SMC_N_YIELD (0X00000003)
+
+
+/* Possible stop commands for SMC_STOP */
+#define SCSTOP_HIBERNATE (0xFFFFFFE1)
+#define SCSTOP_SHUTDOWN (0xFFFFFFE2)
+
+/*
+ * representation of an UUID.
+ */
+struct tf_uuid {
+ u32 time_low;
+ u16 time_mid;
+ u16 time_hi_and_version;
+ u8 clock_seq_and_node[8];
+};
+
+
+/**
+ * Command parameters.
+ */
+struct tf_command_param_value {
+ u32 a;
+ u32 b;
+};
+
+struct tf_command_param_temp_memref {
+ u32 descriptor; /* data pointer for exchange message.*/
+ u32 size;
+ u32 offset;
+};
+
+struct tf_command_param_memref {
+ u32 block;
+ u32 size;
+ u32 offset;
+};
+
+union tf_command_param {
+ struct tf_command_param_value value;
+ struct tf_command_param_temp_memref temp_memref;
+ struct tf_command_param_memref memref;
+};
+
+/**
+ * Answer parameters.
+ */
+struct tf_answer_param_value {
+ u32 a;
+ u32 b;
+};
+
+struct tf_answer_param_size {
+ u32 _ignored;
+ u32 size;
+};
+
+union tf_answer_param {
+ struct tf_answer_param_size size;
+ struct tf_answer_param_value value;
+};
+
+/*
+ * Descriptor tables capacity
+ */
+#define TF_MAX_W3B_COARSE_PAGES (2)
+#define TF_MAX_COARSE_PAGES (8)
+#define TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
+#define TF_DESCRIPTOR_TABLE_CAPACITY \
+ (1 << TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
+#define TF_DESCRIPTOR_TABLE_CAPACITY_MASK \
+ (TF_DESCRIPTOR_TABLE_CAPACITY - 1)
+/* Shared memories coarse pages can map up to 1MB */
+#define TF_MAX_COARSE_PAGE_MAPPED_SIZE \
+ (PAGE_SIZE * TF_DESCRIPTOR_TABLE_CAPACITY)
+/* Shared memories cannot exceed 8MB */
+#define TF_MAX_SHMEM_SIZE \
+ (TF_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
+
+/*
+ * Buffer size for version description fields
+ */
+#define TF_DESCRIPTION_BUFFER_LENGTH 64
+
+/*
+ * Shared memory type flags.
+ */
+#define TF_SHMEM_TYPE_READ (0x00000001)
+#define TF_SHMEM_TYPE_WRITE (0x00000002)
+
+/*
+ * Shared mem flags
+ */
+#define TF_SHARED_MEM_FLAG_INPUT 1
+#define TF_SHARED_MEM_FLAG_OUTPUT 2
+#define TF_SHARED_MEM_FLAG_INOUT 3
+
+
+/*
+ * Parameter types
+ */
+#define TF_PARAM_TYPE_NONE 0x0
+#define TF_PARAM_TYPE_VALUE_INPUT 0x1
+#define TF_PARAM_TYPE_VALUE_OUTPUT 0x2
+#define TF_PARAM_TYPE_VALUE_INOUT 0x3
+#define TF_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5
+#define TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
+#define TF_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7
+#define TF_PARAM_TYPE_MEMREF_INPUT 0xD
+#define TF_PARAM_TYPE_MEMREF_OUTPUT 0xE
+#define TF_PARAM_TYPE_MEMREF_INOUT 0xF
+
+#define TF_PARAM_TYPE_MEMREF_FLAG 0x4
+#define TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
+
+
+#define TF_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+#define TF_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
+
+/*
+ * Login types.
+ */
+#define TF_LOGIN_PUBLIC 0x00000000
+#define TF_LOGIN_USER 0x00000001
+#define TF_LOGIN_GROUP 0x00000002
+#define TF_LOGIN_APPLICATION 0x00000004
+#define TF_LOGIN_APPLICATION_USER 0x00000005
+#define TF_LOGIN_APPLICATION_GROUP 0x00000006
+#define TF_LOGIN_AUTHENTICATION 0x80000000
+#define TF_LOGIN_PRIVILEGED 0x80000002
+
+/* Login variants */
+
+#define TF_LOGIN_VARIANT(main_type, os, variant) \
+ ((main_type) | (1 << 27) | ((os) << 16) | ((variant) << 8))
+
+#define TF_LOGIN_GET_MAIN_TYPE(type) \
+ ((type) & ~TF_LOGIN_VARIANT(0, 0xFF, 0xFF))
+
+#define TF_LOGIN_OS_ANY 0x00
+#define TF_LOGIN_OS_LINUX 0x01
+#define TF_LOGIN_OS_ANDROID 0x04
+
+/* OS-independent variants */
+#define TF_LOGIN_USER_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_GROUP_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_APPLICATION_USER_NONE \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANY, 0xFF)
+#define TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_AUTHENTICATION, TF_LOGIN_OS_ANY, 0x01)
+#define TF_LOGIN_PRIVILEGED_KERNEL \
+ TF_LOGIN_VARIANT(TF_LOGIN_PRIVILEGED, TF_LOGIN_OS_ANY, 0x01)
+
+/* Linux variants */
+#define TF_LOGIN_USER_LINUX_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_GROUP_LINUX_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_LINUX, 0x01)
+#define TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_LINUX, 0x01)
+
+/* Android variants */
+#define TF_LOGIN_USER_ANDROID_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_GROUP_ANDROID_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_ANDROID_UID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_ANDROID, 0x01)
+#define TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANDROID, \
+ 0x01)
+#define TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
+ TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_ANDROID, \
+ 0x01)
+
+/*
+ * return origins
+ */
+#define TF_ORIGIN_COMMS 2
+#define TF_ORIGIN_TEE 3
+#define TF_ORIGIN_TRUSTED_APP 4
+/*
+ * The message types.
+ */
+#define TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02
+#define TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD
+#define TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7
+#define TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9
+#define TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0
+#define TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2
+#define TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5
+#define TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4
+#define TF_MESSAGE_TYPE_MANAGEMENT 0xFE
+
+
+/*
+ * The SChannel error codes.
+ */
+#define S_SUCCESS 0x00000000
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+
+struct tf_command_header {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info;
+ u32 operation_id;
+};
+
+struct tf_answer_header {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info;
+ u32 operation_id;
+ u32 error_code;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT command message.
+ */
+struct tf_command_create_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 device_context_id;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_create_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ /* an opaque Normal World identifier for the device context */
+ u32 device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT command message.
+ */
+struct tf_command_destroy_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 device_context;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT answer message.
+ */
+struct tf_answer_destroy_device_context {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 device_context_id;
+};
+
+/*
+ * OPEN_CLIENT_SESSION command message.
+ */
+struct tf_command_open_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 param_types;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 cancellation_id;
+ u64 timeout;
+ struct tf_uuid destination_uuid;
+ union tf_command_param params[4];
+ u32 login_type;
+ /*
+ * Size = 0 for public, [16] for group identification, [20] for
+ * authentication
+ */
+ u8 login_data[20];
+};
+
+/*
+ * OPEN_CLIENT_SESSION answer message.
+ */
+struct tf_answer_open_client_session {
+ u8 message_size;
+ u8 message_type;
+ u8 error_origin;
+ u8 __reserved;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 client_session;
+ union tf_answer_param answers[4];
+};
+
+/*
+ * CLOSE_CLIENT_SESSION command message.
+ */
+struct tf_command_close_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+};
+
+/*
+ * CLOSE_CLIENT_SESSION answer message.
+ */
+struct tf_answer_close_client_session {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+};
+
+
+/*
+ * REGISTER_SHARED_MEMORY command message
+ */
+struct tf_command_register_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 memory_flags;
+ u32 operation_id;
+ u32 device_context;
+ u32 block_id;
+ u32 shared_mem_size;
+ u32 shared_mem_start_offset;
+ u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+};
+
+/*
+ * REGISTER_SHARED_MEMORY answer message.
+ */
+struct tf_answer_register_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 error_code;
+ u32 block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY command message.
+ */
+struct tf_command_release_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 block;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY answer message.
+ */
+struct tf_answer_release_shared_memory {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 error_code;
+ u32 block_id;
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command message.
+ */
+struct tf_command_invoke_client_command {
+ u8 message_size;
+ u8 message_type;
+ u16 param_types;
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+ u64 timeout;
+ u32 cancellation_id;
+ u32 client_command_identifier;
+ union tf_command_param params[4];
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command answer.
+ */
+struct tf_answer_invoke_client_command {
+ u8 message_size;
+ u8 message_type;
+ u8 error_origin;
+ u8 __reserved;
+ u32 operation_id;
+ u32 error_code;
+ union tf_answer_param answers[4];
+};
+
+/*
+ * CANCEL_CLIENT_OPERATION command message.
+ */
+struct tf_command_cancel_client_operation {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ /* an opaque Normal World identifier for the operation */
+ u32 operation_id;
+ u32 device_context;
+ u32 client_session;
+ u32 cancellation_id;
+};
+
+struct tf_answer_cancel_client_operation {
+ u8 message_size;
+ u8 message_type;
+ u16 message_info_rfu;
+ u32 operation_id;
+ u32 error_code;
+};
+
+/*
+ * MANAGEMENT command message.
+ */
+struct tf_command_management {
+ u8 message_size;
+ u8 message_type;
+ u16 command;
+ u32 operation_id;
+ u32 w3b_size;
+ u32 w3b_start_offset;
+ u32 shared_mem_descriptors[1];
+};
+
+/*
+ * POWER_MANAGEMENT answer message.
+ * The message does not provide message specific parameters.
+ * Therefore no need to define a specific answer structure
+ */
+
+/*
+ * Structure for L2 messages
+ */
+union tf_command {
+ struct tf_command_header header;
+ struct tf_command_create_device_context create_device_context;
+ struct tf_command_destroy_device_context destroy_device_context;
+ struct tf_command_open_client_session open_client_session;
+ struct tf_command_close_client_session close_client_session;
+ struct tf_command_register_shared_memory register_shared_memory;
+ struct tf_command_release_shared_memory release_shared_memory;
+ struct tf_command_invoke_client_command invoke_client_command;
+ struct tf_command_cancel_client_operation cancel_client_operation;
+ struct tf_command_management management;
+};
+
+/*
+ * Structure for any L2 answer
+ */
+
+union tf_answer {
+ struct tf_answer_header header;
+ struct tf_answer_create_device_context create_device_context;
+ struct tf_answer_open_client_session open_client_session;
+ struct tf_answer_close_client_session close_client_session;
+ struct tf_answer_register_shared_memory register_shared_memory;
+ struct tf_answer_release_shared_memory release_shared_memory;
+ struct tf_answer_invoke_client_command invoke_client_command;
+ struct tf_answer_destroy_device_context destroy_device_context;
+ struct tf_answer_cancel_client_operation cancel_client_operation;
+};
+
+/* Structure of the Communication Buffer */
+struct tf_l1_shared_buffer {
+ u32 config_flag_s;
+ u32 w3b_size_max_s;
+ u32 reserved0;
+ u32 w3b_size_current_s;
+ u8 reserved1[48];
+ u8 version_description[TF_DESCRIPTION_BUFFER_LENGTH];
+ u32 status_s;
+ u32 reserved2;
+ u32 sync_serial_n;
+ u32 sync_serial_s;
+ u64 time_n[2];
+ u64 timeout_s[2];
+ u32 first_command;
+ u32 first_free_command;
+ u32 first_answer;
+ u32 first_free_answer;
+ u32 w3b_descriptors[128];
+ #ifdef CONFIG_TF_ZEBRA
+ u8 rpc_trace_buffer[140];
+ u8 rpc_cus_buffer[180];
+ #else
+ u8 reserved3[320];
+ #endif
+ u32 command_queue[TF_N_MESSAGE_QUEUE_CAPACITY];
+ u32 answer_queue[TF_S_ANSWER_QUEUE_CAPACITY];
+};
+
+
+/*
+ * tf_version_information_buffer structure description
+ * Description of the sVersionBuffer handed over from user space to kernel space
+ * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
+ * and handed back to user space
+ */
+struct tf_version_information_buffer {
+ u8 driver_description[65];
+ u8 secure_world_description[65];
+};
+
+
+/* The IOCTLs the driver supports */
+#include <linux/ioctl.h>
+
+#define IOCTL_TF_GET_VERSION _IO('z', 0)
+#define IOCTL_TF_EXCHANGE _IOWR('z', 1, union tf_command)
+#define IOCTL_TF_GET_DESCRIPTION _IOR('z', 2, \
+ struct tf_version_information_buffer)
+
+#endif /* !defined(__TF_PROTOCOL_H__) */
diff --git a/security/smc/tf_util.c b/security/smc/tf_util.c
new file mode 100644
index 0000000..ec9941b
--- /dev/null
+++ b/security/smc/tf_util.c
@@ -0,0 +1,1145 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/mman.h>
+#include "tf_util.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void address_cache_property(unsigned long va)
+{
+ unsigned long pa;
+ unsigned long inner;
+ unsigned long outer;
+
+ asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
+ asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (pa));
+
+ dprintk(KERN_INFO "VA:%x, PA:%x\n",
+ (unsigned int) va,
+ (unsigned int) pa);
+
+ if (pa & 1) {
+ dprintk(KERN_INFO "Prop Error\n");
+ return;
+ }
+
+ outer = (pa >> 2) & 3;
+ dprintk(KERN_INFO "\touter : %x", (unsigned int) outer);
+
+ switch (outer) {
+ case 3:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 2:
+ dprintk(KERN_INFO "Write-Through, no Write-Allocate.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ inner = (pa >> 4) & 7;
+ dprintk(KERN_INFO "\tinner : %x", (unsigned int)inner);
+
+ switch (inner) {
+ case 7:
+ dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
+ break;
+ case 6:
+ dprintk(KERN_INFO "Write-Through.\n");
+ break;
+ case 5:
+ dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
+ break;
+ case 3:
+ dprintk(KERN_INFO "Device.\n");
+ break;
+ case 1:
+ dprintk(KERN_INFO "Strongly-ordered.\n");
+ break;
+ case 0:
+ dprintk(KERN_INFO "Non-cacheable.\n");
+ break;
+ }
+
+ if (pa & 0x00000002)
+ dprintk(KERN_INFO "SuperSection.\n");
+ if (pa & 0x00000080)
+ dprintk(KERN_INFO "Memory is shareable.\n");
+ else
+ dprintk(KERN_INFO "Memory is non-shareable.\n");
+
+ if (pa & 0x00000200)
+ dprintk(KERN_INFO "Non-secure.\n");
+}
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+
+#define LOOP_SIZE (100000)
+
+void run_bogo_mips(void)
+{
+ uint32_t cycles;
+ void *address = &run_bogo_mips;
+
+ dprintk(KERN_INFO "BogoMIPS:\n");
+
+ setup_counters();
+ cycles = run_code_speed(LOOP_SIZE);
+ dprintk(KERN_INFO "%u cycles with code access\n", cycles);
+ cycles = run_data_speed(LOOP_SIZE, (unsigned long)address);
+ dprintk(KERN_INFO "%u cycles to access %x\n", cycles,
+ (unsigned int) address);
+}
+
+#endif /* CONFIG_BENCH_SECURE_CYCLE */
+
+/*
+ * Dump the L1 shared buffer.
+ */
+void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer)
+{
+ dprintk(KERN_INFO
+ "buffer@%p:\n"
+ " config_flag_s=%08X\n"
+ " version_description=%64s\n"
+ " status_s=%08X\n"
+ " sync_serial_n=%08X\n"
+ " sync_serial_s=%08X\n"
+ " time_n[0]=%016llX\n"
+ " time_n[1]=%016llX\n"
+ " timeout_s[0]=%016llX\n"
+ " timeout_s[1]=%016llX\n"
+ " first_command=%08X\n"
+ " first_free_command=%08X\n"
+ " first_answer=%08X\n"
+ " first_free_answer=%08X\n\n",
+ buffer,
+ buffer->config_flag_s,
+ buffer->version_description,
+ buffer->status_s,
+ buffer->sync_serial_n,
+ buffer->sync_serial_s,
+ buffer->time_n[0],
+ buffer->time_n[1],
+ buffer->timeout_s[0],
+ buffer->timeout_s[1],
+ buffer->first_command,
+ buffer->first_free_command,
+ buffer->first_answer,
+ buffer->first_free_answer);
+}
+
+
+/*
+ * Dump the specified SChannel message using dprintk.
+ */
+void tf_dump_command(union tf_command *command)
+{
+ u32 i;
+
+ dprintk(KERN_INFO "message@%p:\n", command);
+
+ switch (command->header.message_type) {
+ case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " device_context_id = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->create_device_context.device_context_id
+ );
+ break;
+
+ case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->destroy_device_context.device_context);
+ break;
+
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION\n"
+ " param_types = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " cancellation_id = 0x%08X\n"
+ " timeout = 0x%016llX\n"
+ " destination_uuid = "
+ "%08X-%04X-%04X-%02X%02X-"
+ "%02X%02X%02X%02X%02X%02X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->open_client_session.param_types,
+ command->header.operation_id,
+ command->open_client_session.device_context,
+ command->open_client_session.cancellation_id,
+ command->open_client_session.timeout,
+ command->open_client_session.destination_uuid.
+ time_low,
+ command->open_client_session.destination_uuid.
+ time_mid,
+ command->open_client_session.destination_uuid.
+ time_hi_and_version,
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[0],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[1],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[2],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[3],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[4],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[5],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[6],
+ command->open_client_session.destination_uuid.
+ clock_seq_and_node[7]
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *param = (uint32_t *) &command->
+ open_client_session.params[i];
+ dprintk(KERN_INFO " params[%d] = "
+ "0x%08X:0x%08X:0x%08X\n",
+ i, param[0], param[1], param[2]);
+ }
+
+ switch (TF_LOGIN_GET_MAIN_TYPE(
+ command->open_client_session.login_type)) {
+ case TF_LOGIN_PUBLIC:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PUBLIC\n");
+ break;
+ case TF_LOGIN_USER:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_USER\n");
+ break;
+ case TF_LOGIN_GROUP:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_GROUP\n");
+ break;
+ case TF_LOGIN_APPLICATION:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION\n");
+ break;
+ case TF_LOGIN_APPLICATION_USER:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION_USER\n");
+ break;
+ case TF_LOGIN_APPLICATION_GROUP:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_APPLICATION_GROUP\n");
+ break;
+ case TF_LOGIN_AUTHENTICATION:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_AUTHENTICATION\n");
+ break;
+ case TF_LOGIN_PRIVILEGED:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PRIVILEGED\n");
+ break;
+ case TF_LOGIN_PRIVILEGED_KERNEL:
+ dprintk(
+ KERN_INFO " login_type = "
+ "TF_LOGIN_PRIVILEGED_KERNEL\n");
+ break;
+ default:
+ dprintk(
+ KERN_ERR " login_type = "
+ "0x%08X (Unknown login type)\n",
+ command->open_client_session.login_type);
+ break;
+ }
+
+ dprintk(
+ KERN_INFO " login_data = ");
+ for (i = 0; i < 20; i++)
+ dprintk(
+ KERN_INFO "%d",
+ command->open_client_session.
+ login_data[i]);
+ dprintk("\n");
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->close_client_session.device_context,
+ command->close_client_session.client_session
+ );
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY\n"
+ " memory_flags = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " block_id = 0x%08X\n"
+ " shared_mem_size = 0x%08X\n"
+ " shared_mem_start_offset = 0x%08X\n"
+ " shared_mem_descriptors[0] = 0x%08X\n"
+ " shared_mem_descriptors[1] = 0x%08X\n"
+ " shared_mem_descriptors[2] = 0x%08X\n"
+ " shared_mem_descriptors[3] = 0x%08X\n"
+ " shared_mem_descriptors[4] = 0x%08X\n"
+ " shared_mem_descriptors[5] = 0x%08X\n"
+ " shared_mem_descriptors[6] = 0x%08X\n"
+ " shared_mem_descriptors[7] = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->register_shared_memory.memory_flags,
+ command->header.operation_id,
+ command->register_shared_memory.device_context,
+ command->register_shared_memory.block_id,
+ command->register_shared_memory.shared_mem_size,
+ command->register_shared_memory.
+ shared_mem_start_offset,
+ command->register_shared_memory.
+ shared_mem_descriptors[0],
+ command->register_shared_memory.
+ shared_mem_descriptors[1],
+ command->register_shared_memory.
+ shared_mem_descriptors[2],
+ command->register_shared_memory.
+ shared_mem_descriptors[3],
+ command->register_shared_memory.
+ shared_mem_descriptors[4],
+ command->register_shared_memory.
+ shared_mem_descriptors[5],
+ command->register_shared_memory.
+ shared_mem_descriptors[6],
+ command->register_shared_memory.
+ shared_mem_descriptors[7]);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " block = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->release_shared_memory.device_context,
+ command->release_shared_memory.block);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND\n"
+ " param_types = 0x%04X\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n"
+ " timeout = 0x%016llX\n"
+ " cancellation_id = 0x%08X\n"
+ " client_command_identifier = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->invoke_client_command.param_types,
+ command->header.operation_id,
+ command->invoke_client_command.device_context,
+ command->invoke_client_command.client_session,
+ command->invoke_client_command.timeout,
+ command->invoke_client_command.cancellation_id,
+ command->invoke_client_command.
+ client_command_identifier
+ );
+
+ for (i = 0; i < 4; i++) {
+ uint32_t *param = (uint32_t *) &command->
+ open_client_session.params[i];
+ dprintk(KERN_INFO " params[%d] = "
+ "0x%08X:0x%08X:0x%08X\n", i,
+ param[0], param[1], param[2]);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND\n"
+ " operation_id = 0x%08X\n"
+ " device_context = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->cancel_client_operation.device_context,
+ command->cancel_client_operation.client_session);
+ break;
+
+ case TF_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_MANAGEMENT\n"
+ " operation_id = 0x%08X\n"
+ " command = 0x%08X\n"
+ " w3b_size = 0x%08X\n"
+ " w3b_start_offset = 0x%08X\n",
+ command->header.message_size,
+ command->header.message_type,
+ command->header.operation_id,
+ command->management.command,
+ command->management.w3b_size,
+ command->management.w3b_start_offset);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " message_type = 0x%08X "
+ "(Unknown message type)\n",
+ command->header.message_type);
+ break;
+ }
+}
+
+
+/*
+ * Dump the specified SChannel answer using dprintk.
+ */
+void tf_dump_answer(union tf_answer *answer)
+{
+ u32 i;
+ dprintk(
+ KERN_INFO "answer@%p:\n",
+ answer);
+
+ switch (answer->header.message_type) {
+ case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_create_device_context\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " device_context = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->create_device_context.error_code,
+ answer->create_device_context.device_context);
+ break;
+
+ case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_DESTROY_DEVICE_CONTEXT\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " device_context_id = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->destroy_device_context.error_code,
+ answer->destroy_device_context.device_context_id);
+ break;
+
+
+ case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_open_client_session\n"
+ " error_origin = 0x%02X\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " client_session = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->open_client_session.error_origin,
+ answer->header.operation_id,
+ answer->open_client_session.error_code,
+ answer->open_client_session.client_session);
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
+ i,
+ answer->open_client_session.answers[i].
+ value.a,
+ answer->open_client_session.answers[i].
+ value.b);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_CLOSE_CLIENT_SESSION\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->close_client_session.error_code);
+ break;
+
+ case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_register_shared_memory\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " block = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->register_shared_memory.error_code,
+ answer->register_shared_memory.block);
+ break;
+
+ case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "ANSWER_RELEASE_SHARED_MEMORY\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n"
+ " block_id = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->release_shared_memory.error_code,
+ answer->release_shared_memory.block_id);
+ break;
+
+ case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "tf_answer_invoke_client_command\n"
+ " error_origin = 0x%02X\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->invoke_client_command.error_origin,
+ answer->header.operation_id,
+ answer->invoke_client_command.error_code
+ );
+ for (i = 0; i < 4; i++) {
+ dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
+ i,
+ answer->invoke_client_command.answers[i].
+ value.a,
+ answer->invoke_client_command.answers[i].
+ value.b);
+ }
+ break;
+
+ case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_ANSWER_CANCEL_CLIENT_COMMAND\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->cancel_client_operation.error_code);
+ break;
+
+ case TF_MESSAGE_TYPE_MANAGEMENT:
+ dprintk(KERN_INFO
+ " message_size = 0x%02X\n"
+ " message_type = 0x%02X "
+ "TF_MESSAGE_TYPE_MANAGEMENT\n"
+ " operation_id = 0x%08X\n"
+ " error_code = 0x%08X\n",
+ answer->header.message_size,
+ answer->header.message_type,
+ answer->header.operation_id,
+ answer->header.error_code);
+ break;
+
+ default:
+ dprintk(
+ KERN_ERR " message_type = 0x%02X "
+ "(Unknown message type)\n",
+ answer->header.message_type);
+ break;
+
+ }
+}
+
+#endif /* defined(TF_DRIVER_DEBUG_SUPPORT) */
+
+/*----------------------------------------------------------------------------
+ * SHA-1 implementation
+ * This is taken from the Linux kernel source crypto/sha1.c
+ *----------------------------------------------------------------------------*/
+
+struct sha1_ctx {
+ u64 count;
+ u32 state[5];
+ u8 buffer[64];
+};
+
+static inline u32 rol(u32 value, u32 bits)
+{
+ return ((value) << (bits)) | ((value) >> (32 - (bits)));
+}
+
+/* blk0() and blk() perform the initial expand. */
+/* I got the idea of expanding during the round function from SSLeay */
+#define blk0(i) block32[i]
+
+#define blk(i) (block32[i & 15] = rol( \
+ block32[(i + 13) & 15] ^ block32[(i + 8) & 15] ^ \
+ block32[(i + 2) & 15] ^ block32[i & 15], 1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R1(v, w, x, y, z, i) do { \
+ z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R2(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R3(v, w, x, y, z, i) do { \
+ z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+#define R4(v, w, x, y, z, i) do { \
+ z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+ w = rol(w, 30); } while (0)
+
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+static void sha1_transform(u32 *state, const u8 *in)
+{
+ u32 a, b, c, d, e;
+ u32 block32[16];
+
+ /* convert/copy data to workspace */
+ for (a = 0; a < sizeof(block32)/sizeof(u32); a++)
+ block32[a] = ((u32) in[4 * a]) << 24 |
+ ((u32) in[4 * a + 1]) << 16 |
+ ((u32) in[4 * a + 2]) << 8 |
+ ((u32) in[4 * a + 3]);
+
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1);
+ R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3);
+ R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5);
+ R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7);
+ R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9);
+ R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11);
+ R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13);
+ R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15);
+
+ R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17);
+ R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19);
+
+ R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21);
+ R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23);
+ R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25);
+ R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27);
+ R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29);
+ R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31);
+ R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33);
+ R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35);
+ R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37);
+ R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39);
+
+ R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41);
+ R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43);
+ R3(b, c, d, e, a, 44); R3(a, b, c, d, e, 45);
+ R3(e, a, b, c, d, 46); R3(d, e, a, b, c, 47);
+ R3(c, d, e, a, b, 48); R3(b, c, d, e, a, 49);
+ R3(a, b, c, d, e, 50); R3(e, a, b, c, d, 51);
+ R3(d, e, a, b, c, 52); R3(c, d, e, a, b, 53);
+ R3(b, c, d, e, a, 54); R3(a, b, c, d, e, 55);
+ R3(e, a, b, c, d, 56); R3(d, e, a, b, c, 57);
+ R3(c, d, e, a, b, 58); R3(b, c, d, e, a, 59);
+
+ R4(a, b, c, d, e, 60); R4(e, a, b, c, d, 61);
+ R4(d, e, a, b, c, 62); R4(c, d, e, a, b, 63);
+ R4(b, c, d, e, a, 64); R4(a, b, c, d, e, 65);
+ R4(e, a, b, c, d, 66); R4(d, e, a, b, c, 67);
+ R4(c, d, e, a, b, 68); R4(b, c, d, e, a, 69);
+ R4(a, b, c, d, e, 70); R4(e, a, b, c, d, 71);
+ R4(d, e, a, b, c, 72); R4(c, d, e, a, b, 73);
+ R4(b, c, d, e, a, 74); R4(a, b, c, d, e, 75);
+ R4(e, a, b, c, d, 76); R4(d, e, a, b, c, 77);
+ R4(c, d, e, a, b, 78); R4(b, c, d, e, a, 79);
+
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+ memset(block32, 0x00, sizeof(block32));
+}
+
+
+static void sha1_init(void *ctx)
+{
+ struct sha1_ctx *sctx = ctx;
+ static const struct sha1_ctx initstate = {
+ 0,
+ { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
+ { 0, }
+ };
+
+ *sctx = initstate;
+}
+
+
+static void sha1_update(void *ctx, const u8 *data, unsigned int len)
+{
+ struct sha1_ctx *sctx = ctx;
+ unsigned int i, j;
+
+ j = (sctx->count >> 3) & 0x3f;
+ sctx->count += len << 3;
+
+ if ((j + len) > 63) {
+ memcpy(&sctx->buffer[j], data, (i = 64 - j));
+ sha1_transform(sctx->state, sctx->buffer);
+ for ( ; i + 63 < len; i += 64)
+ sha1_transform(sctx->state, &data[i]);
+ j = 0;
+ } else
+ i = 0;
+ memcpy(&sctx->buffer[j], &data[i], len - i);
+}
+
+
+/* Add padding and return the message digest. */
+static void sha1_final(void *ctx, u8 *out)
+{
+ struct sha1_ctx *sctx = ctx;
+ u32 i, j, index, padlen;
+ u64 t;
+ u8 bits[8] = { 0, };
+ static const u8 padding[64] = { 0x80, };
+
+ t = sctx->count;
+ bits[7] = 0xff & t; t >>= 8;
+ bits[6] = 0xff & t; t >>= 8;
+ bits[5] = 0xff & t; t >>= 8;
+ bits[4] = 0xff & t; t >>= 8;
+ bits[3] = 0xff & t; t >>= 8;
+ bits[2] = 0xff & t; t >>= 8;
+ bits[1] = 0xff & t; t >>= 8;
+ bits[0] = 0xff & t;
+
+ /* Pad out to 56 mod 64 */
+ index = (sctx->count >> 3) & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64+56) - index);
+ sha1_update(sctx, padding, padlen);
+
+ /* Append length */
+ sha1_update(sctx, bits, sizeof(bits));
+
+ /* Store state in digest */
+ for (i = j = 0; i < 5; i++, j += 4) {
+ u32 t2 = sctx->state[i];
+ out[j+3] = t2 & 0xff; t2 >>= 8;
+ out[j+2] = t2 & 0xff; t2 >>= 8;
+ out[j+1] = t2 & 0xff; t2 >>= 8;
+ out[j] = t2 & 0xff;
+ }
+
+ /* Wipe context */
+ memset(sctx, 0, sizeof(*sctx));
+}
+
+
+
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+/* This function generates a processes hash table for authentication */
+int tf_get_current_process_hash(void *hash)
+{
+ int result = 0;
+ void *buffer;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+
+ buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buffer == NULL) {
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash:"
+ " Out of memory for buffer!\n");
+ return -ENOMEM;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ if ((vma->vm_flags & VM_EXECUTABLE) != 0 && vma->vm_file
+ != NULL) {
+ struct dentry *dentry;
+ unsigned long start;
+ unsigned long cur;
+ unsigned long end;
+ struct sha1_ctx sha1;
+
+ dentry = dget(vma->vm_file->f_dentry);
+
+ dprintk(
+ KERN_DEBUG "tf_get_current_process_hash: "
+ "Found executable VMA for inode %lu "
+ "(%lu bytes).\n",
+ dentry->d_inode->i_ino,
+ (unsigned long) (dentry->d_inode->
+ i_size));
+
+ start = do_mmap(vma->vm_file, 0,
+ dentry->d_inode->i_size,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_PRIVATE, 0);
+ if (start < 0) {
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash"
+ "Hash: do_mmap failed (error %d)!\n",
+ (int) start);
+ dput(dentry);
+ result = -EFAULT;
+ goto vma_out;
+ }
+
+ end = start + dentry->d_inode->i_size;
+
+ sha1_init(&sha1);
+ cur = start;
+ while (cur < end) {
+ unsigned long chunk;
+
+ chunk = end - cur;
+ if (chunk > PAGE_SIZE)
+ chunk = PAGE_SIZE;
+ if (copy_from_user(buffer, (const void *) cur,
+ chunk) != 0) {
+ dprintk(
+ KERN_ERR "tf_get_current_"
+ "process_hash: copy_from_user "
+ "failed!\n");
+ result = -EINVAL;
+ (void) do_munmap(mm, start,
+ dentry->d_inode->i_size);
+ dput(dentry);
+ goto vma_out;
+ }
+ sha1_update(&sha1, buffer, chunk);
+ cur += chunk;
+ }
+ sha1_final(&sha1, hash);
+ result = 0;
+
+ (void) do_munmap(mm, start, dentry->d_inode->i_size);
+ dput(dentry);
+ break;
+ }
+ }
+vma_out:
+ up_read(&(mm->mmap_sem));
+
+ internal_kfree(buffer);
+
+ if (result == -ENOENT)
+ dprintk(
+ KERN_ERR "tf_get_current_process_hash: "
+ "No executable VMA found for process!\n");
+ return result;
+}
+
+#ifndef CONFIG_ANDROID
+/* This function hashes the path of the current application.
+ * If data = NULL ,nothing else is added to the hash
+ else add data to the hash
+ */
+int tf_hash_application_path_and_data(char *buffer, void *data,
+ u32 data_len)
+{
+ int result = -ENOENT;
+ char *buffer = NULL;
+ struct mm_struct *mm;
+ struct vm_area_struct *vma;
+
+ buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (buffer == NULL) {
+ result = -ENOMEM;
+ goto end;
+ }
+
+ mm = current->mm;
+
+ down_read(&(mm->mmap_sem));
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ if ((vma->vm_flags & VM_EXECUTABLE) != 0
+ && vma->vm_file != NULL) {
+ struct path *path;
+ char *endpath;
+ size_t pathlen;
+ struct sha1_ctx sha1;
+ u8 hash[SHA1_DIGEST_SIZE];
+
+ path = &vma->vm_file->f_path;
+
+ endpath = d_path(path, buffer, PAGE_SIZE);
+ if (IS_ERR(path)) {
+ result = PTR_ERR(endpath);
+ up_read(&(mm->mmap_sem));
+ goto end;
+ }
+ pathlen = (buffer + PAGE_SIZE) - endpath;
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ {
+ char *c;
+ dprintk(KERN_DEBUG "current process path = ");
+ for (c = endpath;
+ c < buffer + PAGE_SIZE;
+ c++)
+ dprintk("%c", *c);
+
+ dprintk(", uid=%d, euid=%d\n", current_uid(),
+ current_euid());
+ }
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+ sha1_init(&sha1);
+ sha1_update(&sha1, endpath, pathlen);
+ if (data != NULL) {
+ dprintk(KERN_INFO "current process path: "
+ "Hashing additional data\n");
+ sha1_update(&sha1, data, data_len);
+ }
+ sha1_final(&sha1, hash);
+ memcpy(buffer, hash, sizeof(hash));
+
+ result = 0;
+
+ break;
+ }
+ }
+ up_read(&(mm->mmap_sem));
+
+end:
+ if (buffer != NULL)
+ internal_kfree(buffer);
+
+ return result;
+}
+#endif /* !CONFIG_ANDROID */
+
+void *internal_kmalloc(size_t size, int priority)
+{
+ void *ptr;
+ struct tf_device *dev = tf_get_device();
+
+ ptr = kmalloc(size, priority);
+
+ if (ptr != NULL)
+ atomic_inc(
+ &dev->stats.stat_memories_allocated);
+
+ return ptr;
+}
+
+void internal_kfree(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+ return kfree(ptr);
+}
+
+void internal_vunmap(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+
+ vunmap((void *) (((unsigned int)ptr) & 0xFFFFF000));
+}
+
+void *internal_vmalloc(size_t size)
+{
+ void *ptr;
+ struct tf_device *dev = tf_get_device();
+
+ ptr = vmalloc(size);
+
+ if (ptr != NULL)
+ atomic_inc(
+ &dev->stats.stat_memories_allocated);
+
+ return ptr;
+}
+
+void internal_vfree(void *ptr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (ptr != NULL)
+ atomic_dec(
+ &dev->stats.stat_memories_allocated);
+ return vfree(ptr);
+}
+
+unsigned long internal_get_zeroed_page(int priority)
+{
+ unsigned long result;
+ struct tf_device *dev = tf_get_device();
+
+ result = get_zeroed_page(priority);
+
+ if (result != 0)
+ atomic_inc(&dev->stats.
+ stat_pages_allocated);
+
+ return result;
+}
+
+void internal_free_page(unsigned long addr)
+{
+ struct tf_device *dev = tf_get_device();
+
+ if (addr != 0)
+ atomic_dec(
+ &dev->stats.stat_pages_allocated);
+ return free_page(addr);
+}
+
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas)
+{
+ int result;
+ struct tf_device *dev = tf_get_device();
+
+ result = get_user_pages(
+ tsk,
+ mm,
+ start,
+ len,
+ write,
+ force,
+ pages,
+ vmas);
+
+ if (result > 0)
+ atomic_add(result,
+ &dev->stats.stat_pages_locked);
+
+ return result;
+}
+
+void internal_get_page(struct page *page)
+{
+ struct tf_device *dev = tf_get_device();
+
+ atomic_inc(&dev->stats.stat_pages_locked);
+
+ get_page(page);
+}
+
+void internal_page_cache_release(struct page *page)
+{
+ struct tf_device *dev = tf_get_device();
+
+ atomic_dec(&dev->stats.stat_pages_locked);
+
+ page_cache_release(page);
+}
diff --git a/security/smc/tf_util.h b/security/smc/tf_util.h
new file mode 100644
index 0000000..43a05da
--- /dev/null
+++ b/security/smc/tf_util.h
@@ -0,0 +1,103 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_UTIL_H__
+#define __TF_UTIL_H__
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/crypto.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+#include "tf_protocol.h"
+#include "tf_defs.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void address_cache_property(unsigned long va);
+
+#define dprintk printk
+
+void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer);
+
+void tf_dump_command(union tf_command *command);
+
+void tf_dump_answer(union tf_answer *answer);
+
+#ifdef CONFIG_BENCH_SECURE_CYCLE
+void setup_counters(void);
+void run_bogo_mips(void);
+int run_code_speed(unsigned int loop);
+int run_data_speed(unsigned int loop, unsigned long va);
+#endif /* CONFIG_BENCH_SECURE_CYCLE */
+
+#else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define dprintk(args...) do { ; } while (0)
+#define tf_dump_l1_shared_buffer(buffer) ((void) 0)
+#define tf_dump_command(command) ((void) 0)
+#define tf_dump_answer(answer) ((void) 0)
+
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define SHA1_DIGEST_SIZE 20
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+int tf_get_current_process_hash(void *hash);
+
+#ifndef CONFIG_ANDROID
+int tf_hash_application_path_and_data(char *buffer, void *data, u32 data_len);
+#endif /* !CONFIG_ANDROID */
+
+/*----------------------------------------------------------------------------
+ * Statistic computation
+ *----------------------------------------------------------------------------*/
+
+void *internal_kmalloc(size_t size, int priority);
+void internal_kfree(void *ptr);
+void internal_vunmap(void *ptr);
+void *internal_vmalloc(size_t size);
+void internal_vfree(void *ptr);
+unsigned long internal_get_zeroed_page(int priority);
+void internal_free_page(unsigned long addr);
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas);
+void internal_get_page(struct page *page);
+void internal_page_cache_release(struct page *page);
+#endif /* __TF_UTIL_H__ */
diff --git a/security/smc/tf_zebra.h b/security/smc/tf_zebra.h
new file mode 100644
index 0000000..b30fe6f
--- /dev/null
+++ b/security/smc/tf_zebra.h
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __TF_ZEBRA_H__
+#define __TF_ZEBRA_H__
+
+#include "tf_defs.h"
+
+int tf_ctrl_device_register(void);
+
+int tf_start(struct tf_comm *comm,
+ u32 workspace_addr, u32 workspace_size,
+ u8 *pa_buffer, u32 pa_size,
+ u8 *properties_buffer, u32 properties_length);
+
+/* Assembler entry points to/from secure */
+u32 schedule_secure_world(u32 app_id, u32 proc_id, u32 flags, u32 args);
+u32 rpc_handler(u32 p1, u32 p2, u32 p3, u32 p4);
+u32 read_mpidr(void);
+
+/* L4 SEC clockdomain enabling/disabling */
+void tf_l4sec_clkdm_wakeup(bool wakelock);
+void tf_l4sec_clkdm_allow_idle(bool wakeunlock);
+
+/* Delayed secure resume */
+int tf_delayed_secure_resume(void);
+
+#endif /* __TF_ZEBRA_H__ */