aboutsummaryrefslogtreecommitdiffstats
path: root/security/smc
diff options
context:
space:
mode:
authorTrusted Logic <smc_support@trusted-logic.com>2011-11-09 17:52:01 +0100
committerLeed Aguilar <leed.aguilar@ti.com>2011-12-09 16:01:53 -0500
commit335de32e6b4666d383d904b00612231c9f8ff2a7 (patch)
tree893ca181801a78b80ec84270efe9e9d90edd6bfa /security/smc
parent042a2c41c2445220892ac0562de286dd955ebed9 (diff)
downloadkernel_samsung_espresso10-335de32e6b4666d383d904b00612231c9f8ff2a7.zip
kernel_samsung_espresso10-335de32e6b4666d383d904b00612231c9f8ff2a7.tar.gz
kernel_samsung_espresso10-335de32e6b4666d383d904b00612231c9f8ff2a7.tar.bz2
OMAP: SMC: Update to AG01.06 release
This update should only be used with PPA 1.6.4 and higher. There are many important, coupled fixes in PPA 1.6.3+, including ROM code critical section management. * GP devices should still be built without SMC * SMC driver now supports starting the SMC daemon within a member group system. By default this is set to 1026 DRMRPC * FIPS certification capable (requires more patches to kernel though) * fixed ext4 filesystem corruptions on HW encrypted partitions * fixed wrong handle returned by C_OpenSession (crypto API) when opening a secondary session from a service * fixed SHandleClose silently fails on cryptoki key handles * fixed two instances of the tf_daemon could connect to the SMC PA * fixed Potential memory leak in case of error when tf_daemon is connecting to secure Change-Id: I1b792c88fb4615fae60643d49989ac9fa542bf9b Signed-off-by: Trusted Logic <smc_support@trusted-logic.com> Signed-off-by: Jeremie Corbier <jeremie.corbier@trusted-logic.com> Signed-off-by: Bryan Buckley <bryan.buckley@ti.com>
Diffstat (limited to 'security/smc')
-rw-r--r--security/smc/Kconfig25
-rw-r--r--security/smc/Makefile8
-rw-r--r--security/smc/bridge_pub2sec.S136
-rw-r--r--security/smc/s_version.h10
-rw-r--r--security/smc/tee_client_api_ex.h1
-rw-r--r--security/smc/tf_comm.c167
-rw-r--r--security/smc/tf_comm.h4
-rw-r--r--security/smc/tf_comm_mshield.c632
-rw-r--r--security/smc/tf_conn.c146
-rw-r--r--security/smc/tf_conn.h18
-rw-r--r--security/smc/tf_crypto.c133
-rw-r--r--security/smc/tf_crypto.h55
-rw-r--r--security/smc/tf_crypto_aes.c434
-rw-r--r--security/smc/tf_crypto_digest.c135
-rw-r--r--security/smc/tf_crypto_hmac.c296
-rw-r--r--security/smc/tf_defs.h22
-rw-r--r--security/smc/tf_device.c233
-rw-r--r--security/smc/tf_device_mshield.c225
-rw-r--r--security/smc/tf_dma.c2
-rw-r--r--security/smc/tf_protocol.h23
-rw-r--r--security/smc/tf_self_test_device.c508
-rw-r--r--security/smc/tf_self_test_io.h54
-rw-r--r--security/smc/tf_self_test_post.c707
-rw-r--r--security/smc/tf_teec.c146
-rw-r--r--security/smc/tf_util.c118
-rw-r--r--security/smc/tf_util.h38
-rw-r--r--security/smc/tf_zebra.h17
27 files changed, 2899 insertions, 1394 deletions
diff --git a/security/smc/Kconfig b/security/smc/Kconfig
index 315912a..efab4ee 100644
--- a/security/smc/Kconfig
+++ b/security/smc/Kconfig
@@ -2,10 +2,11 @@ config TF_ZEBRA
bool
config SECURITY_MIDDLEWARE_COMPONENT
- bool "Enable SMC Driver"
+ tristate "Enable SMC Driver"
depends on ARCH_OMAP3 || ARCH_OMAP4
default n
select TF_ZEBRA
+ select CRYPTO_SHA1 if ARCH_OMAP4
help
This option adds kernel support for communication with the SMC
Protected Application.
@@ -37,9 +38,23 @@ config TF_DRIVER_DEBUG_SUPPORT
help
This options enables debug traces in the driver.
-config SMC_BENCH_SECURE_CYCLE
- bool "Enable secure cycles benchmarks"
- depends on TF_DRIVER_DEBUG_SUPPORT && ARCH_OMAP4
+config TF_DRIVER_CRYPTO_FIPS
+ bool "FIPS compliance support"
+ depends on SMC_KERNEL_CRYPTO && ARCH_OMAP4 && MODULE_EXTRA_COPY
default n
help
- This options enables benchmarks.
+ This option enables features for FIPS certification of the SMC
+ cryptographic driver: built-in test vectors for POST, and a
+ device for user space programs to submit more test vectors.
+
+ If you are unsure how to answer this question, answer Y.
+
+config TF_DRIVER_FAULT_INJECTION
+ bool "Fault injection support"
+ depends on TF_DRIVER_CRYPTO_FIPS && ARCH_OMAP4
+ default n
+ help
+ This option allows forcing cryptographic operations in the SMC
+ driver to fail.
+
+ If you are unsure how to answer this question, answer Y.
diff --git a/security/smc/Makefile b/security/smc/Makefile
index 2133178..ed2de4d 100644
--- a/security/smc/Makefile
+++ b/security/smc/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2006-2010 Trusted Logic S.A.
+# Copyright (c) 2011 Trusted Logic S.A.
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or
@@ -41,5 +41,11 @@ tf_driver-objs += tf_device_mshield.o
tf_driver-objs += bridge_pub2sec.o
tf_driver-objs += tf_teec.o
+ifeq ($(CONFIG_TF_DRIVER_CRYPTO_FIPS),y)
+tf_driver-objs += tf_crypto_hmac.o
+tf_driver-objs += tf_self_test_device.o
+tf_driver-objs += tf_self_test_post.o
+endif
+
obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += tf_driver.o
obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += rproc_drm.o
diff --git a/security/smc/bridge_pub2sec.S b/security/smc/bridge_pub2sec.S
index 15cd3b7..05b4c99 100644
--- a/security/smc/bridge_pub2sec.S
+++ b/security/smc/bridge_pub2sec.S
@@ -63,9 +63,13 @@ schedule_secure_world:
b label_smc
return_from_rpc:
- ldr r9, =g_RPC_parameters
- ldm r9, {r0-r3}
- /* fall through */
+ /* Always return success to an RPC */
+ /* The effective status is stored in the L1 shared buffer */
+ mov r0, #0x00000000
+ mov r1, #0x00000000
+ mov r2, #0x00000000
+ mov r3, #0x00000000
+ /* fall through... */
return_from_irq:
ldr r10, =g_secure_task_id
@@ -78,38 +82,10 @@ label_smc:
dsb
dmb
-#ifdef CONFIG_BENCH_SECURE_CYCLE
- /* Come from Non Secure: activate counter 1 (write to 0 are ignored) */
- mov r4, #0x00000002
-
- /* Read Count Enable Set Register */
- mcr p15, 0x0, r4, c9, c12, 1
-
- /* Come from Non Secure: stop counter 0 (write to 0 are ignored) */
- mov r4, #0x00000001
-
- /* Write Count Enable Clear Register */
- mcr p15, 0x0, r4, c9, c12, 2
-#endif
-
smc #0
b service_end
nop
-#ifdef CONFIG_BENCH_SECURE_CYCLE
- /* Come from Secure: activate counter 0 (write to 0 are ignored) */
- mov r4, #0x00000001
-
- /* Write Count Enable Set Register */
- mcr p15, 0x0, r4, c9, c12, 1
-
- /* Come from Secure: stop counter 1 (write to 0 are ignored) */
- mov r4, #0x00000002
-
- /* Write Count Enable Clear Register */
- mcr p15, 0x0, r4, c9, c12, 2
-#endif
-
INVALIDATE_BTB
ldr r8, =g_secure_task_id
str r6, [r8]
@@ -118,25 +94,7 @@ label_smc:
ldr r8, =g_service_end
str r0, [r8]
- b schedule_secure_world_exit
-
service_end:
-
-schedule_secure_world_exit:
-#ifdef CONFIG_BENCH_SECURE_CYCLE
- /* Come from Secure: activate counter 0 (write to 0 are ignored) */
- mov r4, #0x00000001
-
- /* Write Count Enable Set Register */
- mcr p15, 0x0, r4, c9, c12, 1
-
- /* Come from Secure: stop counter 1 (write to 0 are ignored) */
- mov r4, #0x00000002
-
- /* Write Count Enable Clear Register */
- mcr p15, 0x0, r4, c9, c12, 2
-#endif
-
INVALIDATE_BTB
/* Restore registers */
@@ -144,20 +102,6 @@ schedule_secure_world_exit:
rpc_handler:
.global rpc_handler
-
-#ifdef CONFIG_BENCH_SECURE_CYCLE
- /* Come from Secure: activate counter 0 (write to 0 are ignored) */
- mov r4, #0x00000001
-
- /* Write Count Enable Set Register */
- mcr p15, 0x0, r4, c9, c12, 1
-
- /* Come from Secure: stop counter 1 (write to 0 are ignored) */
- mov r4, #0x00000002
-
- /* Write Count Enable Clear Register */
- mcr p15, 0x0, r4, c9, c12, 2
-#endif
INVALIDATE_BTB
/* g_RPC_advancement = RPC_ADVANCEMENT_PENDING */
@@ -165,8 +109,8 @@ rpc_handler:
mov r9, #RPC_ADVANCEMENT_PENDING
str r9, [r8]
- ldr r8, =g_RPC_parameters
- stm r8, {r0-r3}
+ /* The effective command is stored in the L1 shared buffer */
+ mov r0, #0x00000000
ldr r8, =g_secure_task_id
str r6, [r8]
@@ -178,65 +122,3 @@ rpc_handler:
/* Restore registers */
pop {r4-r12, pc}
-#ifdef CONFIG_BENCH_SECURE_CYCLE
-
-setup_counters:
- .global setup_counters
-
- push {r14}
-
- mrc p15, 0, r2, c9, c12, 0
- orr r2, r2, #0x3
- mcr p15, 0, r2, c9, c12, 0
-
- mrc p15, 0, r2, c9, c12, 1
- orr r2, r2, #0x80000000
- mcr p15, 0, r2, c9, c12, 1
-
- pop {pc}
-
-run_code_speed:
- .global run_code_speed
-
- push {r14}
-
- /* Reset cycle counter */
- mov r2, #0
- mcr p15, 0, r2, c9, c13, 0
-
-run_code_speed_loop:
- sub r0, r0, #1
- cmp r0, #0
- bne run_code_speed_loop
-
- /* Read cycle counter */
- mrc p15, 0, r0, c9, c13, 0
-
- pop {pc}
-
-run_data_speed:
- .global run_data_speed
-
- push {r14}
-
- /* Reset cycle counter */
- mov r2, #0
- mcr p15, 0, r2, c9, c13, 0
-
-run_data_speed_loop:
- sub r0, r0, #1
- ldr r2, [r1]
- cmp r0, #0
- bne run_data_speed_loop
-
- /* read cycle counter */
- mrc p15, 0, r0, c9, c13, 0
-
- pop {pc}
-
-#endif
-
-read_mpidr:
- .global read_mpidr
- mrc p15, 0, r0, c0, c0, 5
- bx lr
diff --git a/security/smc/s_version.h b/security/smc/s_version.h
index a16d548..38a3f1e 100644
--- a/security/smc/s_version.h
+++ b/security/smc/s_version.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010 Trusted Logic S.A.
+ * Copyright (c) 2011 Trusted Logic S.A.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -38,14 +38,14 @@
/*
* This version number must be updated for each new release
*/
-#define S_VERSION_MAIN "01.04"
+#define S_VERSION_MAIN "01.06"
/*
* If this is a patch or engineering version use the following
* defines to set the version number. Else set these values to 0.
*/
-#define S_VERSION_PATCH 6
-#define S_VERSION_ENG 0
+#define S_VERSION_ENG 1
+#define S_VERSION_PATCH 0
#ifdef S_VERSION_BUILD
/* TRICK: detect if S_VERSION is defined but empty */
@@ -84,8 +84,8 @@
S_VERSION_OS \
S_VERSION_PLATFORM \
S_VERSION_MAIN \
- _S_VERSION_PATCH \
_S_VERSION_ENG \
+ _S_VERSION_PATCH \
"." __STRINGIFY2(S_VERSION_BUILD) " " \
S_VERSION_VARIANT
diff --git a/security/smc/tee_client_api_ex.h b/security/smc/tee_client_api_ex.h
index 4988904..da938d5 100644
--- a/security/smc/tee_client_api_ex.h
+++ b/security/smc/tee_client_api_ex.h
@@ -29,6 +29,7 @@
/* Implementation-defined login types */
#define TEEC_LOGIN_AUTHENTICATION 0x80000000
#define TEEC_LOGIN_PRIVILEGED 0x80000002
+#define TEEC_LOGIN_PRIVILEGED_KERNEL 0x80000002
/* Type definitions */
diff --git a/security/smc/tf_comm.c b/security/smc/tf_comm.c
index 451e63e..af4cef6 100644
--- a/security/smc/tf_comm.c
+++ b/security/smc/tf_comm.c
@@ -102,16 +102,16 @@ void tf_set_current_time(struct tf_comm *comm)
/* read sync_serial_n and change the TimeSlot bit field */
new_sync_serial =
- tf_read_reg32(&comm->pBuffer->sync_serial_n) + 1;
+ tf_read_reg32(&comm->l1_buffer->sync_serial_n) + 1;
do_gettimeofday(&now);
time64 = now.tv_sec;
time64 = (time64 * 1000) + (now.tv_usec / 1000);
/* Write the new time64 and nSyncSerial into shared memory */
- tf_write_reg64(&comm->pBuffer->time_n[new_sync_serial &
+ tf_write_reg64(&comm->l1_buffer->time_n[new_sync_serial &
TF_SYNC_SERIAL_TIMESLOT_N], time64);
- tf_write_reg32(&comm->pBuffer->sync_serial_n,
+ tf_write_reg32(&comm->l1_buffer->sync_serial_n,
new_sync_serial);
spin_unlock(&comm->lock);
@@ -134,12 +134,12 @@ static inline void tf_read_timeout(struct tf_comm *comm, u64 *time)
while (sync_serial_s_initial != sync_serial_s_final) {
sync_serial_s_initial = tf_read_reg32(
- &comm->pBuffer->sync_serial_s);
+ &comm->l1_buffer->sync_serial_s);
time64 = tf_read_reg64(
- &comm->pBuffer->timeout_s[sync_serial_s_initial&1]);
+ &comm->l1_buffer->timeout_s[sync_serial_s_initial&1]);
sync_serial_s_final = tf_read_reg32(
- &comm->pBuffer->sync_serial_s);
+ &comm->l1_buffer->sync_serial_s);
}
spin_unlock(&comm->lock);
@@ -456,7 +456,11 @@ u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm)
* Linux's pte doesn't keep track of TEX value.
* Have to jump to hwpte see include/asm/pgtable.h
*/
- hwpte = (u32 *) (((u32) ptep) - 0x800);
+#ifdef PTE_HWTABLE_SIZE
+ hwpte = (u32 *) (ptep + PTE_HWTABLE_PTRS);
+#else
+ hwpte = (u32 *) (ptep - PTRS_PER_PTE);
+#endif
if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
goto error;
@@ -622,9 +626,9 @@ void tf_cleanup_shared_memory(
}
/*
- * Make sure the coarse pages are allocated. If not allocated, do it Locks down
- * the physical memory pages
- * Verifies the memory attributes depending on flags
+ * Make sure the coarse pages are allocated. If not allocated, do it.
+ * Locks down the physical memory pages.
+ * Verifies the memory attributes depending on flags.
*/
int tf_fill_descriptor_table(
struct tf_coarse_page_table_allocation_context *alloc_context,
@@ -642,7 +646,7 @@ int tf_fill_descriptor_table(
u32 coarse_page_count;
u32 page_count;
u32 page_shift = 0;
- int error;
+ int ret = 0;
unsigned int info = read_cpuid(CPUID_CACHETYPE);
dprintk(KERN_INFO "tf_fill_descriptor_table"
@@ -680,7 +684,7 @@ int tf_fill_descriptor_table(
dprintk(KERN_ERR "tf_fill_descriptor_table(%p): "
"%u pages required to map shared memory!\n",
shmem_desc, page_count);
- error = -ENOMEM;
+ ret = -ENOMEM;
goto error;
}
@@ -729,11 +733,11 @@ int tf_fill_descriptor_table(
if (coarse_pg_table == NULL) {
dprintk(KERN_ERR
- "tf_fill_descriptor_table(%p):"
- " SCXLNXConnAllocateCoarsePageTable "
+ "tf_fill_descriptor_table(%p): "
+ "tf_alloc_coarse_page_table "
"failed for coarse page %d\n",
shmem_desc, coarse_page_index);
- error = -ENOMEM;
+ ret = -ENOMEM;
goto error;
}
@@ -778,11 +782,11 @@ int tf_fill_descriptor_table(
if ((pages <= 0) ||
(pages != (pages_to_get - page_shift))) {
- dprintk(KERN_ERR"tf_fill_descriptor_table:"
+ dprintk(KERN_ERR "tf_fill_descriptor_table:"
" get_user_pages got %d pages while "
"trying to get %d pages!\n",
pages, pages_to_get - page_shift);
- error = -EFAULT;
+ ret = -EFAULT;
goto error;
}
@@ -813,34 +817,29 @@ int tf_fill_descriptor_table(
"memory. Rejecting!\n",
coarse_pg_table->
descriptors[j]);
- error = -EFAULT;
+ ret = -EFAULT;
goto error;
}
}
- } else {
- /* Kernel-space memory */
- for (j = page_shift;
- j < pages_to_get;
- j++) {
+ } else if (is_vmalloc_addr((void *)buffer_offset_vaddr)) {
+ /* Kernel-space memory obtained through vmalloc */
+ dprintk(KERN_INFO
+ "tf_fill_descriptor_table: "
+ "vmalloc'ed buffer starting at %p\n",
+ (void *)buffer_offset_vaddr);
+ for (j = page_shift; j < pages_to_get; j++) {
struct page *page;
void *addr =
(void *)(buffer_offset_vaddr +
(j - page_shift) * PAGE_SIZE);
- if (!is_vmalloc_addr(addr)) {
- dprintk(KERN_ERR
- "tf_fill_descriptor_table: "
- "cannot handle address %p\n",
- addr);
- error = -EFAULT;
- goto error;
- }
page = vmalloc_to_page(addr);
if (page == NULL) {
dprintk(KERN_ERR
"tf_fill_descriptor_table: "
- "cannot map %p to page\n",
+ "cannot map %p (vmalloc) "
+ "to page\n",
addr);
- error = -ENOMEM;
+ ret = -EFAULT;
goto error;
}
coarse_pg_table->descriptors[j] = (u32)page;
@@ -853,6 +852,17 @@ int tf_fill_descriptor_table(
&init_mm,
vmas[j]);
}
+ } else {
+ /* Other kernel-space memory */
+ dprintk(KERN_INFO
+ "tf_fill_descriptor_table: "
+ "buffer starting at virtual address %p\n",
+ (void *)buffer_offset_vaddr);
+ dprintk(KERN_WARNING
+ "tf_fill_descriptor_table: "
+ "address type not supported\n");
+ ret = -ENOSYS;
+ goto error;
}
dmac_flush_range((void *)coarse_pg_table->descriptors,
@@ -917,7 +927,7 @@ error:
shmem_desc,
0);
- return error;
+ return ret;
}
@@ -928,7 +938,7 @@ error:
u8 *tf_get_description(struct tf_comm *comm)
{
if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
- return comm->pBuffer->version_description;
+ return comm->l1_buffer->version_description;
return NULL;
}
@@ -1000,9 +1010,9 @@ static void tf_copy_answers(struct tf_comm *comm)
if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
spin_lock(&comm->lock);
first_free_answer = tf_read_reg32(
- &comm->pBuffer->first_free_answer);
+ &comm->l1_buffer->first_free_answer);
first_answer = tf_read_reg32(
- &comm->pBuffer->first_answer);
+ &comm->l1_buffer->first_answer);
while (first_answer != first_free_answer) {
/* answer queue not empty */
@@ -1026,7 +1036,7 @@ static void tf_copy_answers(struct tf_comm *comm)
for (i = 0;
i < sizeof(struct tf_answer_header)/sizeof(u32);
i++)
- temp[i] = comm->pBuffer->answer_queue[
+ temp[i] = comm->l1_buffer->answer_queue[
(first_answer + i) %
TF_S_ANSWER_QUEUE_CAPACITY];
@@ -1035,7 +1045,7 @@ static void tf_copy_answers(struct tf_comm *comm)
sizeof(struct tf_answer_header)/sizeof(u32);
temp = (uint32_t *) &sComAnswer;
for (i = 0; i < command_size; i++)
- temp[i] = comm->pBuffer->answer_queue[
+ temp[i] = comm->l1_buffer->answer_queue[
(first_answer + i) %
TF_S_ANSWER_QUEUE_CAPACITY];
@@ -1049,7 +1059,7 @@ static void tf_copy_answers(struct tf_comm *comm)
answerStructureTemp->answer_copied = true;
first_answer += command_size;
- tf_write_reg32(&comm->pBuffer->first_answer,
+ tf_write_reg32(&comm->l1_buffer->first_answer,
first_answer);
}
spin_unlock(&(comm->lock));
@@ -1078,9 +1088,9 @@ static void tf_copy_command(
spin_lock(&comm->lock);
first_command = tf_read_reg32(
- &comm->pBuffer->first_command);
+ &comm->l1_buffer->first_command);
first_free_command = tf_read_reg32(
- &comm->pBuffer->first_free_command);
+ &comm->l1_buffer->first_free_command);
queue_words_count = first_free_command - first_command;
command_size = command->header.message_size +
@@ -1164,7 +1174,7 @@ copy:
tf_dump_command(command);
for (i = 0; i < command_size; i++)
- comm->pBuffer->command_queue[
+ comm->l1_buffer->command_queue[
(first_free_command + i) %
TF_N_MESSAGE_QUEUE_CAPACITY] =
((uint32_t *) command)[i];
@@ -1175,7 +1185,7 @@ copy:
tf_write_reg32(
&comm->
- pBuffer->first_free_command,
+ l1_buffer->first_free_command,
first_free_command);
}
spin_unlock(&comm->lock);
@@ -1196,9 +1206,6 @@ static int tf_send_recv(struct tf_comm *comm,
struct tf_answer_struct *answerStruct,
struct tf_connection *connection,
int bKillable
- #ifdef CONFIG_TF_ZEBRA
- , bool *secure_is_idle
- #endif
)
{
int result;
@@ -1234,17 +1241,6 @@ copy_answers:
#ifdef CONFIG_FREEZER
if (unlikely(freezing(current))) {
-#ifdef CONFIG_TF_ZEBRA
- if (!(*secure_is_idle)) {
- if (tf_schedule_secure_world(comm, true) ==
- STATUS_PENDING)
- goto copy_answers;
-
- tf_l4sec_clkdm_allow_idle(true);
- *secure_is_idle = true;
- }
-#endif
-
dprintk(KERN_INFO
"Entering refrigerator.\n");
refrigerator();
@@ -1327,9 +1323,9 @@ copy_answers:
u32 first_command;
spin_lock(&comm->lock);
first_command = tf_read_reg32(
- &comm->pBuffer->first_command);
+ &comm->l1_buffer->first_command);
first_free_command = tf_read_reg32(
- &comm->pBuffer->first_free_command);
+ &comm->l1_buffer->first_free_command);
spin_unlock(&comm->lock);
tf_read_timeout(comm, &timeout);
if ((first_free_command == first_command) &&
@@ -1350,13 +1346,9 @@ copy_answers:
*/
#ifdef CONFIG_TF_ZEBRA
schedule_secure_world:
- if (*secure_is_idle) {
- tf_l4sec_clkdm_wakeup(true);
- *secure_is_idle = false;
- }
#endif
- result = tf_schedule_secure_world(comm, false);
+ result = tf_schedule_secure_world(comm);
if (result < 0)
goto exit;
goto copy_answers;
@@ -1383,18 +1375,6 @@ wait:
"prepare to sleep 0x%lx jiffies\n",
nRelativeTimeoutJiffies);
-#ifdef CONFIG_TF_ZEBRA
- if (!(*secure_is_idle)) {
- if (tf_schedule_secure_world(comm, true) == STATUS_PENDING) {
- finish_wait(&comm->wait_queue, &wait);
- wait_prepared = false;
- goto copy_answers;
- }
- tf_l4sec_clkdm_allow_idle(true);
- *secure_is_idle = true;
- }
-#endif
-
/* go to sleep */
if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
dprintk(KERN_INFO
@@ -1413,16 +1393,6 @@ exit:
wait_prepared = false;
}
-#ifdef CONFIG_TF_ZEBRA
- if ((!(*secure_is_idle)) && (result != -EIO)) {
- if (tf_schedule_secure_world(comm, true) == STATUS_PENDING)
- goto copy_answers;
-
- tf_l4sec_clkdm_allow_idle(true);
- *secure_is_idle = true;
- }
-#endif
-
#ifdef CONFIG_FREEZER
current->flags &= ~(PF_FREEZER_NOSIG);
current->flags |= (saved_flags & PF_FREEZER_NOSIG);
@@ -1453,9 +1423,6 @@ int tf_send_receive(struct tf_comm *comm,
cpumask_t saved_cpu_mask;
cpumask_t local_cpu_mask = CPU_MASK_NONE;
#endif
-#ifdef CONFIG_TF_ZEBRA
- bool secure_is_idle = true;
-#endif
answerStructure.answer = answer;
answerStructure.answer_copied = false;
@@ -1493,11 +1460,7 @@ int tf_send_receive(struct tf_comm *comm,
* Send the command
*/
error = tf_send_recv(comm,
- command, &answerStructure, connection, bKillable
- #ifdef CONFIG_TF_ZEBRA
- , &secure_is_idle
- #endif
- );
+ command, &answerStructure, connection, bKillable);
if (!bKillable && sigkill_pending()) {
if ((command->header.message_type ==
@@ -1578,11 +1541,7 @@ int tf_send_receive(struct tf_comm *comm,
connection->device_context;
error = tf_send_recv(comm,
- command, &answerStructure, connection, false
- #ifdef CONFIG_TF_ZEBRA
- , &secure_is_idle
- #endif
- );
+ command, &answerStructure, connection, false);
if (error == -EINTR) {
/*
* Another thread already sent
@@ -1624,11 +1583,7 @@ int tf_send_receive(struct tf_comm *comm,
destroy_context:
error = tf_send_recv(comm,
- command, &answerStructure, connection, false
- #ifdef CONFIG_TF_ZEBRA
- , &secure_is_idle
- #endif
- );
+ command, &answerStructure, connection, false);
/*
* tf_send_recv cannot return an error because
@@ -1683,7 +1638,7 @@ int tf_power_management(struct tf_comm *comm,
}
#endif
- status = ((tf_read_reg32(&(comm->pBuffer->status_s))
+ status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
& TF_STATUS_POWER_STATE_MASK)
>> TF_STATUS_POWER_STATE_SHIFT);
diff --git a/security/smc/tf_comm.h b/security/smc/tf_comm.h
index 48bd934..8921dc1 100644
--- a/security/smc/tf_comm.h
+++ b/security/smc/tf_comm.h
@@ -154,9 +154,7 @@ int tf_fill_descriptor_table(
* Standard communication operations
*----------------------------------------------------------------------------*/
-#define STATUS_PENDING 0x00000001
-
-int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit);
+int tf_schedule_secure_world(struct tf_comm *comm);
int tf_send_receive(
struct tf_comm *comm,
diff --git a/security/smc/tf_comm_mshield.c b/security/smc/tf_comm_mshield.c
index c36473e..aec5b80 100644
--- a/security/smc/tf_comm_mshield.c
+++ b/security/smc/tf_comm_mshield.c
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2010 Trusted Logic S.A.
+ * Copyright (c) 2011 Trusted Logic S.A.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -33,12 +33,14 @@
#include <asm/cacheflush.h>
+#include "s_version.h"
#include "tf_defs.h"
#include "tf_comm.h"
#include "tf_util.h"
#include "tf_conn.h"
#include "tf_zebra.h"
#include "tf_crypto.h"
+#include "mach/omap4-common.h"
/*--------------------------------------------------------------------------
* Internal constants
@@ -67,7 +69,6 @@
#define RPC_ADVANCEMENT_FINISHED 2
u32 g_RPC_advancement;
-u32 g_RPC_parameters[4] = {0, 0, 0, 0};
u32 g_secure_task_id;
u32 g_service_end;
@@ -80,6 +81,8 @@ u32 g_service_end;
#define API_HAL_TASK_MGR_RPCINIT_INDEX 0x08
#define API_HAL_KM_GETSECUREROMCODECRC_INDEX 0x0B
#define API_HAL_SEC_L3_RAM_RESIZE_INDEX 0x17
+#define API_HAL_HWATURNOFF_INDEX 0x29
+#define API_HAL_ACTIVATEHWAPWRMGRPATCH_INDEX 0x2A
#define API_HAL_RET_VALUE_OK 0x0
@@ -105,37 +108,18 @@ struct tf_ns_pa_info {
void *results;
};
-/*
- * AFY: I would like to remove the L0 buffer altogether:
- * - you can use the L1 shared buffer to pass the RPC parameters and results:
- * I think these easily fit in 256 bytes and you can use the area at
- * offset 0x2C0-0x3BF in the L1 shared buffer
- */
-struct tf_init_buffer {
- u32 init_status;
- u32 protocol_version;
- u32 l1_shared_buffer_descr;
- u32 backing_store_addr;
- u32 backext_storage_addr;
- u32 workspace_addr;
- u32 workspace_size;
- u32 properties_length;
- u8 properties_buffer[1];
-};
#ifdef CONFIG_HAS_WAKELOCK
static struct wake_lock g_tf_wake_lock;
-static u32 tf_wake_lock_count = 0;
#endif
static struct clockdomain *smc_l4_sec_clkdm;
-static u32 smc_l4_sec_clkdm_use_count = 0;
static int __init tf_early_init(void)
{
g_secure_task_id = 0;
- dprintk(KERN_INFO "SMC early init\n");
+ dpr_info("SMC early init\n");
smc_l4_sec_clkdm = clkdm_lookup("l4_secure_clkdm");
if (smc_l4_sec_clkdm == NULL)
@@ -148,7 +132,120 @@ static int __init tf_early_init(void)
return 0;
}
+#ifdef MODULE
+int __initdata (*tf_comm_early_init)(void) = &tf_early_init;
+#else
early_initcall(tf_early_init);
+#endif
+
+/*
+ * The timeout timer used to power off clocks
+ */
+#define INACTIVITY_TIMER_TIMEOUT 10 /* ms */
+
+static DEFINE_SPINLOCK(clk_timer_lock);
+static struct timer_list tf_crypto_clock_timer;
+static int tf_crypto_clock_enabled;
+
+void tf_clock_timer_init(void)
+{
+ init_timer(&tf_crypto_clock_timer);
+ tf_crypto_clock_enabled = 0;
+
+ /* HWA Clocks Patch init */
+ omap4_secure_dispatcher(API_HAL_ACTIVATEHWAPWRMGRPATCH_INDEX,
+ 0, 0, 0, 0, 0, 0);
+}
+
+u32 tf_try_disabling_secure_hwa_clocks(u32 mask)
+{
+ return omap4_secure_dispatcher(API_HAL_HWATURNOFF_INDEX,
+ FLAG_START_HAL_CRITICAL, 1, mask, 0, 0, 0);
+}
+
+static void tf_clock_timer_cb(unsigned long data)
+{
+ unsigned long flags;
+ u32 ret = 0;
+
+ dprintk(KERN_INFO "%s called...\n", __func__);
+
+ spin_lock_irqsave(&clk_timer_lock, flags);
+
+ /*
+ * If one of the HWA is used (by secure or public) the timer
+ * function cuts all the HWA clocks
+ */
+ if (tf_crypto_clock_enabled) {
+ dprintk(KERN_INFO "%s; tf_crypto_clock_enabled = %d\n",
+ __func__, tf_crypto_clock_enabled);
+ goto restart;
+ }
+
+ ret = tf_crypto_turn_off_clocks();
+
+ /*
+ * From MShield-DK 1.3.3 sources:
+ *
+ * Digest: 1 << 0
+ * DES : 1 << 1
+ * AES1 : 1 << 2
+ * AES2 : 1 << 3
+ */
+ if (ret & 0xf)
+ goto restart;
+
+ wake_unlock(&g_tf_wake_lock);
+ clkdm_allow_idle(smc_l4_sec_clkdm);
+
+ spin_unlock_irqrestore(&clk_timer_lock, flags);
+
+ dprintk(KERN_INFO "%s success\n", __func__);
+ return;
+
+restart:
+ dprintk("%s: will wait one more time ret=0x%x\n", __func__, ret);
+ mod_timer(&tf_crypto_clock_timer,
+ jiffies + msecs_to_jiffies(INACTIVITY_TIMER_TIMEOUT));
+
+ spin_unlock_irqrestore(&clk_timer_lock, flags);
+}
+
+void tf_clock_timer_start(void)
+{
+ unsigned long flags;
+ dprintk(KERN_INFO "%s\n", __func__);
+
+ spin_lock_irqsave(&clk_timer_lock, flags);
+
+ tf_crypto_clock_enabled++;
+
+ wake_lock(&g_tf_wake_lock);
+ clkdm_wakeup(smc_l4_sec_clkdm);
+
+ /* Stop the timer if already running */
+ if (timer_pending(&tf_crypto_clock_timer))
+ del_timer(&tf_crypto_clock_timer);
+
+ /* Configure the timer */
+ tf_crypto_clock_timer.expires =
+ jiffies + msecs_to_jiffies(INACTIVITY_TIMER_TIMEOUT);
+ tf_crypto_clock_timer.function = tf_clock_timer_cb;
+
+ add_timer(&tf_crypto_clock_timer);
+
+ spin_unlock_irqrestore(&clk_timer_lock, flags);
+}
+
+void tf_clock_timer_stop(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&clk_timer_lock, flags);
+ tf_crypto_clock_enabled--;
+ spin_unlock_irqrestore(&clk_timer_lock, flags);
+
+ dprintk(KERN_INFO "%s\n", __func__);
+}
/*
* Function responsible for formatting parameters to pass from NS world to
@@ -161,14 +258,12 @@ u32 omap4_secure_dispatcher(u32 app_id, u32 flags, u32 nargs,
unsigned long iflags;
u32 pub2sec_args[5] = {0, 0, 0, 0, 0};
- /*dprintk(KERN_INFO "omap4_secure_dispatcher: "
- "app_id=0x%08x, flags=0x%08x, nargs=%u\n",
- app_id, flags, nargs);*/
+ /*dpr_info("%s: app_id=0x%08x, flags=0x%08x, nargs=%u\n",
+ __func__, app_id, flags, nargs);*/
/*if (nargs != 0)
- dprintk(KERN_INFO
- "omap4_secure_dispatcher: args=%08x, %08x, %08x, %08x\n",
- arg1, arg2, arg3, arg4);*/
+ dpr_info("%s: args=%08x, %08x, %08x, %08x\n",
+ __func__, arg1, arg2, arg3, arg4);*/
pub2sec_args[0] = nargs;
pub2sec_args[1] = arg1;
@@ -186,26 +281,28 @@ u32 omap4_secure_dispatcher(u32 app_id, u32 flags, u32 nargs,
/*
* Put L4 Secure clock domain to SW_WKUP so that modules are accessible
*/
- tf_l4sec_clkdm_wakeup(false);
+ clkdm_wakeup(smc_l4_sec_clkdm);
local_irq_save(iflags);
-#ifdef DEBUG
- BUG_ON((read_mpidr() & 0x00000003) != 0);
-#endif
+
/* proc_id is always 0 */
ret = schedule_secure_world(app_id, 0, flags, __pa(pub2sec_args));
local_irq_restore(iflags);
/* Restore the HW_SUP on L4 Sec clock domain so hardware can idle */
- tf_l4sec_clkdm_allow_idle(false);
+ if ((app_id != API_HAL_HWATURNOFF_INDEX) &&
+ (!timer_pending(&tf_crypto_clock_timer))) {
+ (void) tf_crypto_turn_off_clocks();
+ clkdm_allow_idle(smc_l4_sec_clkdm);
+ }
- /*dprintk(KERN_INFO "omap4_secure_dispatcher()\n");*/
+ /*dpr_info("%s()\n", __func__);*/
return ret;
}
/* Yields the Secure World */
-int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit)
+int tf_schedule_secure_world(struct tf_comm *comm)
{
int status = 0;
int ret;
@@ -220,41 +317,38 @@ int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit)
case RPC_ADVANCEMENT_NONE:
/* Return from IRQ */
appli_id = SMICODEPUB_IRQ_END;
- if (prepare_exit)
- status = STATUS_PENDING;
break;
case RPC_ADVANCEMENT_PENDING:
/* nothing to do in this case */
goto exit;
default:
case RPC_ADVANCEMENT_FINISHED:
- if (prepare_exit)
- goto exit;
appli_id = SMICODEPUB_RPC_END;
g_RPC_advancement = RPC_ADVANCEMENT_NONE;
break;
}
+ tf_clock_timer_start();
+
g_service_end = 1;
/* yield to the Secure World */
ret = omap4_secure_dispatcher(appli_id, /* app_id */
0, 0, /* flags, nargs */
0, 0, 0, 0); /* arg1, arg2, arg3, arg4 */
if (g_service_end != 0) {
- dprintk(KERN_ERR "Service End ret=%X\n", ret);
+ dpr_err("Service End ret=%X\n", ret);
if (ret == 0) {
- dmac_flush_range((void *)comm->init_shared_buffer,
- (void *)(((u32)(comm->init_shared_buffer)) +
+ dmac_flush_range((void *)comm->l1_buffer,
+ (void *)(((u32)(comm->l1_buffer)) +
PAGE_SIZE));
- outer_inv_range(__pa(comm->init_shared_buffer),
- __pa(comm->init_shared_buffer) +
+ outer_inv_range(__pa(comm->l1_buffer),
+ __pa(comm->l1_buffer) +
PAGE_SIZE);
- ret = ((struct tf_init_buffer *)
- (comm->init_shared_buffer))->init_status;
+ ret = comm->l1_buffer->exit_code;
- dprintk(KERN_ERR "SMC PA failure ret=%X\n", ret);
+ dpr_err("SMC PA failure ret=%X\n", ret);
if (ret == 0)
ret = -EFAULT;
}
@@ -264,6 +358,8 @@ int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit)
status = ret;
}
+ tf_clock_timer_stop();
+
exit:
local_irq_restore(iflags);
@@ -278,17 +374,17 @@ static int tf_se_init(struct tf_comm *comm,
unsigned int crc;
if (comm->se_initialized) {
- dprintk(KERN_INFO "tf_se_init: SE already initialized... "
- "nothing to do\n");
+ dpr_info("%s: SE already initialized... nothing to do\n",
+ __func__);
return 0;
}
/* Secure CRC read */
- dprintk(KERN_INFO "tf_se_init: Secure CRC Read...\n");
+ dpr_info("%s: Secure CRC Read...\n", __func__);
crc = omap4_secure_dispatcher(API_HAL_KM_GETSECUREROMCODECRC_INDEX,
0, 0, 0, 0, 0, 0);
- printk(KERN_INFO "SMC: SecureCRC=0x%08X\n", crc);
+ pr_info("SMC: SecureCRC=0x%08X\n", crc);
/*
* Flush caches before resize, just to be sure there is no
@@ -304,46 +400,45 @@ static int tf_se_init(struct tf_comm *comm,
wmb();
/* SRAM resize */
- dprintk(KERN_INFO "tf_se_init: SRAM resize (52KB)...\n");
+ dpr_info("%s: SRAM resize (52KB)...\n", __func__);
error = omap4_secure_dispatcher(API_HAL_SEC_L3_RAM_RESIZE_INDEX,
FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
SEC_RAM_SIZE_52KB, 0, 0, 0);
if (error == API_HAL_RET_VALUE_OK) {
- dprintk(KERN_INFO "tf_se_init: SRAM resize OK\n");
+ dpr_info("%s: SRAM resize OK\n", __func__);
} else {
- dprintk(KERN_ERR "tf_se_init: "
- "SRAM resize failed [0x%x]\n", error);
+ dpr_err("%s: SRAM resize failed [0x%x]\n", __func__, error);
goto error;
}
/* SDP init */
- dprintk(KERN_INFO "tf_se_init: SDP runtime init..."
+ dpr_info("%s: SDP runtime init..."
"(sdp_backing_store_addr=%x, sdp_bkext_store_addr=%x)\n",
+ __func__,
sdp_backing_store_addr, sdp_bkext_store_addr);
error = omap4_secure_dispatcher(API_HAL_SDP_RUNTIMEINIT_INDEX,
FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 2,
sdp_backing_store_addr, sdp_bkext_store_addr, 0, 0);
if (error == API_HAL_RET_VALUE_OK) {
- dprintk(KERN_INFO "tf_se_init: SDP runtime init OK\n");
+ dpr_info("%s: SDP runtime init OK\n", __func__);
} else {
- dprintk(KERN_ERR "tf_se_init: "
- "SDP runtime init failed [0x%x]\n", error);
+ dpr_err("%s: SDP runtime init failed [0x%x]\n",
+ __func__, error);
goto error;
}
/* RPC init */
- dprintk(KERN_INFO "tf_se_init: RPC init...\n");
+ dpr_info("%s: RPC init...\n", __func__);
error = omap4_secure_dispatcher(API_HAL_TASK_MGR_RPCINIT_INDEX,
FLAG_START_HAL_CRITICAL, 1,
(u32) (u32(*const) (u32, u32, u32, u32)) &rpc_handler, 0, 0, 0);
if (error == API_HAL_RET_VALUE_OK) {
- dprintk(KERN_INFO "tf_se_init: RPC init OK\n");
+ dpr_info("%s: RPC init OK\n", __func__);
} else {
- dprintk(KERN_ERR "tf_se_init: "
- "RPC init failed [0x%x]\n", error);
+ dpr_err("%s: RPC init failed [0x%x]\n", __func__, error);
goto error;
}
@@ -361,21 +456,20 @@ static u32 tf_rpc_init(struct tf_comm *comm)
u32 protocol_version;
u32 rpc_error = RPC_SUCCESS;
- dprintk(KERN_INFO "tf_rpc_init(%p)\n", comm);
+ dpr_info("%s(%p)\n", __func__, comm);
spin_lock(&(comm->lock));
- dmac_flush_range((void *)comm->init_shared_buffer,
- (void *)(((u32)(comm->init_shared_buffer)) + PAGE_SIZE));
- outer_inv_range(__pa(comm->init_shared_buffer),
- __pa(comm->init_shared_buffer) + PAGE_SIZE);
+ dmac_flush_range((void *)comm->l1_buffer,
+ (void *)(((u32)(comm->l1_buffer)) + PAGE_SIZE));
+ outer_inv_range(__pa(comm->l1_buffer),
+ __pa(comm->l1_buffer) + PAGE_SIZE);
- protocol_version = ((struct tf_init_buffer *)
- (comm->init_shared_buffer))->protocol_version;
+ protocol_version = comm->l1_buffer->protocol_version;
if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
!= TF_S_PROTOCOL_MAJOR_VERSION) {
- dprintk(KERN_ERR "SMC: Unsupported SMC Protocol PA Major "
+ dpr_err("SMC: Unsupported SMC Protocol PA Major "
"Version (0x%02x, expected 0x%02x)!\n",
GET_PROTOCOL_MAJOR_VERSION(protocol_version),
TF_S_PROTOCOL_MAJOR_VERSION);
@@ -386,20 +480,16 @@ static u32 tf_rpc_init(struct tf_comm *comm)
spin_unlock(&(comm->lock));
- register_smc_public_crypto_digest();
- register_smc_public_crypto_aes();
-
return rpc_error;
}
static u32 tf_rpc_trace(struct tf_comm *comm)
{
- dprintk(KERN_INFO "tf_rpc_trace(%p)\n", comm);
+ dpr_info("%s(%p)\n", __func__, comm);
#ifdef CONFIG_SECURE_TRACE
spin_lock(&(comm->lock));
- printk(KERN_INFO "SMC PA: %s",
- comm->pBuffer->rpc_trace_buffer);
+ pr_info("SMC PA: %s", comm->l1_buffer->rpc_trace_buffer);
spin_unlock(&(comm->lock));
#endif
return RPC_SUCCESS;
@@ -419,40 +509,39 @@ int tf_rpc_execute(struct tf_comm *comm)
u32 rpc_command;
u32 rpc_error = RPC_NO;
-#ifdef DEBUG
- BUG_ON((read_mpidr() & 0x00000003) != 0);
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ BUG_ON((hard_smp_processor_id() & 0x00000003) != 0);
#endif
/* Lock the RPC */
mutex_lock(&(comm->rpc_mutex));
- rpc_command = g_RPC_parameters[1];
+ rpc_command = comm->l1_buffer->rpc_command;
if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
- dprintk(KERN_INFO "tf_rpc_execute: "
- "Executing CMD=0x%x\n",
- g_RPC_parameters[1]);
+ dpr_info("%s: Executing CMD=0x%x\n",
+ __func__, rpc_command);
switch (rpc_command) {
case RPC_CMD_YIELD:
- dprintk(KERN_INFO "tf_rpc_execute: "
- "RPC_CMD_YIELD\n");
+ dpr_info("%s: RPC_CMD_YIELD\n", __func__);
rpc_error = RPC_YIELD;
- g_RPC_parameters[0] = RPC_SUCCESS;
+ comm->l1_buffer->rpc_status = RPC_SUCCESS;
break;
case RPC_CMD_TRACE:
rpc_error = RPC_NON_YIELD;
- g_RPC_parameters[0] = tf_rpc_trace(comm);
+ comm->l1_buffer->rpc_status = tf_rpc_trace(comm);
break;
default:
if (tf_crypto_execute_rpc(rpc_command,
- comm->pBuffer->rpc_cus_buffer) != 0)
- g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS;
+ comm->l1_buffer->rpc_cus_buffer) != 0)
+ comm->l1_buffer->rpc_status =
+ RPC_ERROR_BAD_PARAMETERS;
else
- g_RPC_parameters[0] = RPC_SUCCESS;
+ comm->l1_buffer->rpc_status = RPC_SUCCESS;
rpc_error = RPC_NON_YIELD;
break;
}
@@ -461,50 +550,12 @@ int tf_rpc_execute(struct tf_comm *comm)
mutex_unlock(&(comm->rpc_mutex));
- dprintk(KERN_INFO "tf_rpc_execute: Return 0x%x\n",
- rpc_error);
+ dpr_info("%s: Return 0x%x\n", __func__, rpc_error);
return rpc_error;
}
/*--------------------------------------------------------------------------
- * L4 SEC Clock domain handling
- *-------------------------------------------------------------------------- */
-
-static DEFINE_SPINLOCK(clk_lock);
-void tf_l4sec_clkdm_wakeup(bool wakelock)
-{
- unsigned long flags;
- spin_lock_irqsave(&clk_lock, flags);
-#ifdef CONFIG_HAS_WAKELOCK
- if (wakelock) {
- tf_wake_lock_count++;
- wake_lock(&g_tf_wake_lock);
- }
-#endif
- smc_l4_sec_clkdm_use_count++;
- clkdm_wakeup(smc_l4_sec_clkdm);
- spin_unlock_irqrestore(&clk_lock, flags);
-}
-
-void tf_l4sec_clkdm_allow_idle(bool wakeunlock)
-{
- unsigned long flags;
- spin_lock_irqsave(&clk_lock, flags);
- smc_l4_sec_clkdm_use_count--;
- if (smc_l4_sec_clkdm_use_count == 0)
- clkdm_allow_idle(smc_l4_sec_clkdm);
-#ifdef CONFIG_HAS_WAKELOCK
- if (wakeunlock){
- tf_wake_lock_count--;
- if (tf_wake_lock_count == 0)
- wake_unlock(&g_tf_wake_lock);
- }
-#endif
- spin_unlock_irqrestore(&clk_lock, flags);
-}
-
-/*--------------------------------------------------------------------------
* Power management
*-------------------------------------------------------------------------- */
/*
@@ -520,7 +571,7 @@ int tf_pm_shutdown(struct tf_comm *comm)
union tf_command command;
union tf_answer answer;
- dprintk(KERN_INFO "tf_pm_shutdown()\n");
+ dpr_info("%s()\n", __func__);
memset(&command, 0, sizeof(command));
@@ -539,17 +590,16 @@ int tf_pm_shutdown(struct tf_comm *comm)
false);
if (error != 0) {
- dprintk(KERN_ERR "tf_pm_shutdown(): "
- "tf_send_receive failed (error %d)!\n",
- error);
+ dpr_err("%s(): tf_send_receive failed (error %d)!\n",
+ __func__, error);
return error;
}
#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
if (answer.header.error_code != 0)
- dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
+ dpr_err("tf_driver: shutdown failed.\n");
else
- dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
+ dpr_info("tf_driver: shutdown succeeded.\n");
#endif
return answer.header.error_code;
@@ -560,7 +610,7 @@ int tf_pm_hibernate(struct tf_comm *comm)
{
struct tf_device *dev = tf_get_device();
- dprintk(KERN_INFO "tf_pm_hibernate()\n");
+ dpr_info("%s()\n", __func__);
/*
* As we enter in CORE OFF, the keys are going to be cleared.
@@ -576,101 +626,11 @@ int tf_pm_hibernate(struct tf_comm *comm)
return 0;
}
-#ifdef CONFIG_SMC_KERNEL_CRYPTO
-#define DELAYED_RESUME_NONE 0
-#define DELAYED_RESUME_PENDING 1
-#define DELAYED_RESUME_ONGOING 2
-
-static DEFINE_SPINLOCK(tf_delayed_resume_lock);
-static int tf_need_delayed_resume = DELAYED_RESUME_NONE;
-
-int tf_delayed_secure_resume(void)
-{
- int ret;
- union tf_command message;
- union tf_answer answer;
- struct tf_device *dev = tf_get_device();
-
- spin_lock(&tf_delayed_resume_lock);
- if (likely(tf_need_delayed_resume == DELAYED_RESUME_NONE)) {
- spin_unlock(&tf_delayed_resume_lock);
- return 0;
- }
-
- if (unlikely(tf_need_delayed_resume == DELAYED_RESUME_ONGOING)) {
- spin_unlock(&tf_delayed_resume_lock);
-
- /*
- * Wait for the other caller to actually finish the delayed
- * resume operation
- */
- while (tf_need_delayed_resume != DELAYED_RESUME_NONE)
- cpu_relax();
-
- return 0;
- }
-
- tf_need_delayed_resume = DELAYED_RESUME_ONGOING;
- spin_unlock(&tf_delayed_resume_lock);
-
- /*
- * When the system leaves CORE OFF, HWA are configured as secure. We
- * need them as public for the Linux Crypto API.
- */
- memset(&message, 0, sizeof(message));
-
- message.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
- message.header.message_size =
- (sizeof(struct tf_command_management) -
- sizeof(struct tf_command_header))/sizeof(u32);
- message.management.command =
- TF_MANAGEMENT_RESUME_FROM_CORE_OFF;
-
- ret = tf_send_receive(&dev->sm, &message, &answer, NULL, false);
- if (ret) {
- printk(KERN_ERR "tf_pm_resume(%p): "
- "tf_send_receive failed (error %d)!\n",
- &dev->sm, ret);
-
- unregister_smc_public_crypto_digest();
- unregister_smc_public_crypto_aes();
- return ret;
- }
-
- if (answer.header.error_code) {
- unregister_smc_public_crypto_digest();
- unregister_smc_public_crypto_aes();
- }
-
- spin_lock(&tf_delayed_resume_lock);
- tf_need_delayed_resume = DELAYED_RESUME_NONE;
- spin_unlock(&tf_delayed_resume_lock);
-
- return answer.header.error_code;
-}
-#endif
-
int tf_pm_resume(struct tf_comm *comm)
{
- dprintk(KERN_INFO "tf_pm_resume()\n");
- #if 0
- {
- void *workspace_va;
- struct tf_device *dev = tf_get_device();
- workspace_va = ioremap(dev->workspace_addr,
- dev->workspace_size);
- printk(KERN_INFO
- "Read first word of workspace [0x%x]\n",
- *(uint32_t *)workspace_va);
- }
- #endif
-
-#ifdef CONFIG_SMC_KERNEL_CRYPTO
- spin_lock(&tf_delayed_resume_lock);
- tf_need_delayed_resume = DELAYED_RESUME_PENDING;
- spin_unlock(&tf_delayed_resume_lock);
-#endif
+ dpr_info("%s()\n", __func__);
+ tf_aes_pm_resume();
return 0;
}
@@ -682,8 +642,7 @@ int tf_init(struct tf_comm *comm)
{
spin_lock_init(&(comm->lock));
comm->flags = 0;
- comm->pBuffer = NULL;
- comm->init_shared_buffer = NULL;
+ comm->l1_buffer = NULL;
comm->se_initialized = false;
@@ -693,10 +652,10 @@ int tf_init(struct tf_comm *comm)
if (tf_crypto_init() != PUBLIC_CRYPTO_OPERATION_SUCCESS)
return -EFAULT;
- if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
- register_smc_public_crypto_digest();
- register_smc_public_crypto_aes();
- }
+ pr_info("%s\n", S_VERSION_STRING);
+
+ register_smc_public_crypto_digest();
+ register_smc_public_crypto_aes();
return 0;
}
@@ -705,11 +664,9 @@ int tf_init(struct tf_comm *comm)
int tf_start(struct tf_comm *comm,
u32 workspace_addr, u32 workspace_size,
u8 *pa_buffer, u32 pa_size,
- u8 *properties_buffer, u32 properties_length)
+ u32 conf_descriptor, u32 conf_offset, u32 conf_size)
{
- struct tf_init_buffer *init_shared_buffer = NULL;
struct tf_l1_shared_buffer *l1_shared_buffer = NULL;
- u32 l1_shared_buffer_descr;
struct tf_ns_pa_info pa_info;
int ret;
u32 descr;
@@ -725,37 +682,26 @@ int tf_start(struct tf_comm *comm,
sched_getaffinity(0, &saved_cpu_mask);
ret_affinity = sched_setaffinity(0, &local_cpu_mask);
if (ret_affinity != 0)
- dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
+ dpr_err("sched_setaffinity #1 -> 0x%lX", ret_affinity);
#endif
- tf_l4sec_clkdm_wakeup(true);
-
workspace_size -= SZ_1M;
sdp_backing_store_addr = workspace_addr + workspace_size;
workspace_size -= 0x20000;
sdp_bkext_store_addr = workspace_addr + workspace_size;
- /*
- * Implementation notes:
- *
- * 1/ The PA buffer (pa_buffer)is now owned by this function.
- * In case of error, it is responsible for releasing the buffer.
- *
- * 2/ The PA Info and PA Buffer will be freed through a RPC call
- * at the beginning of the PA entry in the SE.
- */
if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
- dprintk(KERN_ERR "tf_start(%p): "
- "The SMC PA is already started\n", comm);
+ dpr_err("%s(%p): The SMC PA is already started\n",
+ __func__, comm);
ret = -EFAULT;
goto error1;
}
if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) {
- dprintk(KERN_ERR "tf_start(%p): "
- "The L1 structure size is incorrect!\n", comm);
+ dpr_err("%s(%p): The L1 structure size is incorrect!\n",
+ __func__, comm);
ret = -EFAULT;
goto error1;
}
@@ -763,31 +709,17 @@ int tf_start(struct tf_comm *comm,
ret = tf_se_init(comm, sdp_backing_store_addr,
sdp_bkext_store_addr);
if (ret != 0) {
- dprintk(KERN_ERR "tf_start(%p): "
- "SE initialization failed\n", comm);
+ dpr_err("%s(%p): SE initialization failed\n", __func__, comm);
goto error1;
}
- init_shared_buffer =
- (struct tf_init_buffer *)
- internal_get_zeroed_page(GFP_KERNEL);
- if (init_shared_buffer == NULL) {
- dprintk(KERN_ERR "tf_start(%p): "
- "Ouf of memory!\n", comm);
-
- ret = -ENOMEM;
- goto error1;
- }
- /* Ensure the page is mapped */
- __set_page_locked(virt_to_page(init_shared_buffer));
l1_shared_buffer =
(struct tf_l1_shared_buffer *)
internal_get_zeroed_page(GFP_KERNEL);
if (l1_shared_buffer == NULL) {
- dprintk(KERN_ERR "tf_start(%p): "
- "Ouf of memory!\n", comm);
+ dpr_err("%s(%p): Ouf of memory!\n", __func__, comm);
ret = -ENOMEM;
goto error1;
@@ -795,73 +727,56 @@ int tf_start(struct tf_comm *comm,
/* Ensure the page is mapped */
__set_page_locked(virt_to_page(l1_shared_buffer));
- dprintk(KERN_INFO "tf_start(%p): "
- "L0SharedBuffer={0x%08x, 0x%08x}\n", comm,
- (u32) init_shared_buffer, (u32) __pa(init_shared_buffer));
- dprintk(KERN_INFO "tf_start(%p): "
- "L1SharedBuffer={0x%08x, 0x%08x}\n", comm,
+ dpr_info("%s(%p): L1SharedBuffer={0x%08x, 0x%08x}\n",
+ __func__, comm,
(u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer));
descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer,
current->mm);
- l1_shared_buffer_descr = (
- ((u32) __pa(l1_shared_buffer) & 0xFFFFF000) |
- (descr & 0xFFF));
+ pa_info.certificate = (void *) workspace_addr;
+ pa_info.parameters = (void *) __pa(l1_shared_buffer);
+ pa_info.results = (void *) __pa(l1_shared_buffer);
- pa_info.certificate = (void *) __pa(pa_buffer);
- pa_info.parameters = (void *) __pa(init_shared_buffer);
- pa_info.results = (void *) __pa(init_shared_buffer);
+ l1_shared_buffer->l1_shared_buffer_descr = descr & 0xFFF;
- init_shared_buffer->l1_shared_buffer_descr = l1_shared_buffer_descr;
+ l1_shared_buffer->backing_store_addr = sdp_backing_store_addr;
+ l1_shared_buffer->backext_storage_addr = sdp_bkext_store_addr;
+ l1_shared_buffer->workspace_addr = workspace_addr;
+ l1_shared_buffer->workspace_size = workspace_size;
- init_shared_buffer->backing_store_addr = sdp_backing_store_addr;
- init_shared_buffer->backext_storage_addr = sdp_bkext_store_addr;
- init_shared_buffer->workspace_addr = workspace_addr;
- init_shared_buffer->workspace_size = workspace_size;
+ dpr_info("%s(%p): System Configuration (%d bytes)\n",
+ __func__, comm, conf_size);
+ dpr_info("%s(%p): Starting PA (%d bytes)...\n",
+ __func__, comm, pa_size);
- init_shared_buffer->properties_length = properties_length;
- if (properties_length == 0) {
- init_shared_buffer->properties_buffer[0] = 0;
- } else {
- /* Test for overflow */
- if ((init_shared_buffer->properties_buffer +
- properties_length
- > init_shared_buffer->properties_buffer) &&
- (properties_length <=
- init_shared_buffer->properties_length)) {
- memcpy(init_shared_buffer->properties_buffer,
- properties_buffer,
- properties_length);
- } else {
- dprintk(KERN_INFO "tf_start(%p): "
- "Configuration buffer size from userland is "
- "incorrect(%d, %d)\n",
- comm, (u32) properties_length,
- init_shared_buffer->properties_length);
- ret = -EFAULT;
- goto error1;
- }
- }
-
- dprintk(KERN_INFO "tf_start(%p): "
- "System Configuration (%d bytes)\n", comm,
- init_shared_buffer->properties_length);
- dprintk(KERN_INFO "tf_start(%p): "
- "Starting PA (%d bytes)...\n", comm, pa_size);
/*
* Make sure all data is visible to the secure world
*/
- dmac_flush_range((void *)init_shared_buffer,
- (void *)(((u32)init_shared_buffer) + PAGE_SIZE));
- outer_clean_range(__pa(init_shared_buffer),
- __pa(init_shared_buffer) + PAGE_SIZE);
-
- dmac_flush_range((void *)pa_buffer,
- (void *)(pa_buffer + pa_size));
- outer_clean_range(__pa(pa_buffer),
- __pa(pa_buffer) + pa_size);
+ dmac_flush_range((void *)l1_shared_buffer,
+ (void *)(((u32)l1_shared_buffer) + PAGE_SIZE));
+ outer_clean_range(__pa(l1_shared_buffer),
+ __pa(l1_shared_buffer) + PAGE_SIZE);
+
+ if (pa_size > workspace_size) {
+ dpr_err("%s(%p): PA size is incorrect (%x)\n",
+ __func__, comm, pa_size);
+ ret = -EFAULT;
+ goto error1;
+ }
+ {
+ void *tmp;
+ tmp = ioremap_nocache(workspace_addr, pa_size);
+ if (copy_from_user(tmp, pa_buffer, pa_size)) {
+ iounmap(tmp);
+ dpr_err("%s(%p): Cannot access PA buffer (%p)\n",
+ __func__, comm, (void *) pa_buffer);
+ ret = -EFAULT;
+ goto error1;
+ }
+ iounmap(tmp);
+ }
dmac_flush_range((void *)&pa_info,
(void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info)));
outer_clean_range(__pa(&pa_info),
@@ -869,10 +784,11 @@ int tf_start(struct tf_comm *comm,
wmb();
spin_lock(&(comm->lock));
- comm->init_shared_buffer = init_shared_buffer;
- comm->pBuffer = l1_shared_buffer;
+ comm->l1_buffer = l1_shared_buffer;
+ comm->l1_buffer->conf_descriptor = conf_descriptor;
+ comm->l1_buffer->conf_offset = conf_offset;
+ comm->l1_buffer->conf_size = conf_size;
spin_unlock(&(comm->lock));
- init_shared_buffer = NULL;
l1_shared_buffer = NULL;
/*
@@ -882,8 +798,7 @@ int tf_start(struct tf_comm *comm,
tf_set_current_time(comm);
/* Workaround for issue #6081 */
- if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
- disable_nonboot_cpus();
+ disable_nonboot_cpus();
/*
* Start the SMC PA
@@ -892,8 +807,7 @@ int tf_start(struct tf_comm *comm,
FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
__pa(&pa_info), 0, 0, 0);
if (ret != API_HAL_RET_VALUE_OK) {
- printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n",
- ret);
+ pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
goto error2;
}
@@ -902,31 +816,28 @@ loop:
mutex_lock(&(comm->rpc_mutex));
if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
- dprintk(KERN_INFO "tf_rpc_execute: "
- "Executing CMD=0x%x\n",
- g_RPC_parameters[1]);
+ dpr_info("%s: Executing CMD=0x%x\n",
+ __func__, comm->l1_buffer->rpc_command);
- switch (g_RPC_parameters[1]) {
+ switch (comm->l1_buffer->rpc_command) {
case RPC_CMD_YIELD:
- dprintk(KERN_INFO "tf_rpc_execute: "
- "RPC_CMD_YIELD\n");
+ dpr_info("%s: RPC_CMD_YIELD\n", __func__);
set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
&(comm->flags));
- g_RPC_parameters[0] = RPC_SUCCESS;
+ comm->l1_buffer->rpc_status = RPC_SUCCESS;
break;
case RPC_CMD_INIT:
- dprintk(KERN_INFO "tf_rpc_execute: "
- "RPC_CMD_INIT\n");
- g_RPC_parameters[0] = tf_rpc_init(comm);
+ dpr_info("%s: RPC_CMD_INIT\n", __func__);
+ comm->l1_buffer->rpc_status = tf_rpc_init(comm);
break;
case RPC_CMD_TRACE:
- g_RPC_parameters[0] = tf_rpc_trace(comm);
+ comm->l1_buffer->rpc_status = tf_rpc_trace(comm);
break;
default:
- g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS;
+ comm->l1_buffer->rpc_status = RPC_ERROR_BAD_PARAMETERS;
break;
}
g_RPC_advancement = RPC_ADVANCEMENT_FINISHED;
@@ -934,10 +845,9 @@ loop:
mutex_unlock(&(comm->rpc_mutex));
- ret = tf_schedule_secure_world(comm, false);
+ ret = tf_schedule_secure_world(comm);
if (ret != 0) {
- printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n",
- ret);
+ pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
goto error2;
}
@@ -948,39 +858,21 @@ loop:
wake_up(&(comm->wait_queue));
ret = 0;
- #if 0
- {
- void *workspace_va;
- workspace_va = ioremap(workspace_addr, workspace_size);
- printk(KERN_INFO
- "Read first word of workspace [0x%x]\n",
- *(uint32_t *)workspace_va);
- }
- #endif
-
/* Workaround for issue #6081 */
- if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
- enable_nonboot_cpus();
+ enable_nonboot_cpus();
goto exit;
error2:
/* Workaround for issue #6081 */
- if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
- enable_nonboot_cpus();
+ enable_nonboot_cpus();
spin_lock(&(comm->lock));
- l1_shared_buffer = comm->pBuffer;
- init_shared_buffer = comm->init_shared_buffer;
- comm->pBuffer = NULL;
- comm->init_shared_buffer = NULL;
+ l1_shared_buffer = comm->l1_buffer;
+ comm->l1_buffer = NULL;
spin_unlock(&(comm->lock));
error1:
- if (init_shared_buffer != NULL) {
- __clear_page_locked(virt_to_page(init_shared_buffer));
- internal_free_page((unsigned long) init_shared_buffer);
- }
if (l1_shared_buffer != NULL) {
__clear_page_locked(virt_to_page(l1_shared_buffer));
internal_free_page((unsigned long) l1_shared_buffer);
@@ -990,11 +882,9 @@ exit:
#ifdef CONFIG_SMP
ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
if (ret_affinity != 0)
- dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
+ dpr_err("sched_setaffinity #2 -> 0x%lX", ret_affinity);
#endif
- tf_l4sec_clkdm_allow_idle(true);
-
if (ret > 0)
ret = -EFAULT;
@@ -1003,7 +893,7 @@ exit:
void tf_terminate(struct tf_comm *comm)
{
- dprintk(KERN_INFO "tf_terminate(%p)\n", comm);
+ dpr_info("%s(%p)\n", __func__, comm);
spin_lock(&(comm->lock));
diff --git a/security/smc/tf_conn.c b/security/smc/tf_conn.c
index ec8eeb7..943b95c 100644
--- a/security/smc/tf_conn.c
+++ b/security/smc/tf_conn.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
+#include <linux/stddef.h>
#include <linux/types.h>
#include "s_version.h"
@@ -38,6 +39,8 @@
#include "tf_crypto.h"
#endif
+#define TF_PRIVILEGED_UID_GID 1000 /* Android system AID */
+
/*----------------------------------------------------------------------------
* Management of the shared memory blocks.
*
@@ -48,7 +51,7 @@
/**
* Unmaps a shared memory
**/
-static void tf_unmap_shmem(
+void tf_unmap_shmem(
struct tf_connection *connection,
struct tf_shmem_desc *shmem_desc,
u32 full_cleanup)
@@ -106,7 +109,7 @@ retry:
* Update the buffer_start_offset and buffer_size fields
* shmem_desc is updated to the mapped shared memory descriptor
**/
-static int tf_map_shmem(
+int tf_map_shmem(
struct tf_connection *connection,
u32 buffer,
/* flags for read-write access rights on the memory */
@@ -190,7 +193,7 @@ static int tf_map_shmem(
error);
goto error;
}
- desc->pBuffer = (u8 *) buffer;
+ desc->client_buffer = (u8 *) buffer;
/*
* Successful completion.
@@ -275,7 +278,7 @@ struct vm_area_struct *tf_find_vma(struct mm_struct *mm,
return vma;
}
-static int tf_validate_shmem_and_flags(
+int tf_validate_shmem_and_flags(
u32 shmem,
u32 shmem_size,
u32 flags)
@@ -383,7 +386,7 @@ static int tf_map_temp_shmem(struct tf_connection *connection,
/* Map the temp shmem block */
u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
- u32 descriptorCount;
+ u32 descriptor_count;
if (in_user_space) {
error = tf_validate_shmem_and_flags(
@@ -403,7 +406,7 @@ static int tf_map_temp_shmem(struct tf_connection *connection,
&(temp_memref->offset),
temp_memref->size,
shmem_desc,
- &descriptorCount);
+ &descriptor_count);
temp_memref->descriptor = shared_mem_descriptors[0];
}
@@ -690,13 +693,8 @@ int tf_open_client_session(
*/
*(u32 *) &command->open_client_session.login_data =
current_euid();
-#ifndef CONFIG_ANDROID
- command->open_client_session.login_type =
- (u32) TF_LOGIN_USER_LINUX_EUID;
-#else
command->open_client_session.login_type =
(u32) TF_LOGIN_USER_ANDROID_EUID;
-#endif
/* Added one word */
command->open_client_session.message_size += 1;
@@ -716,43 +714,13 @@ int tf_open_client_session(
error = -EACCES;
goto error;
}
-#ifndef CONFIG_ANDROID
- command->open_client_session.login_type =
- TF_LOGIN_GROUP_LINUX_GID;
-#else
command->open_client_session.login_type =
TF_LOGIN_GROUP_ANDROID_GID;
-#endif
command->open_client_session.message_size += 1; /* GID */
break;
}
-#ifndef CONFIG_ANDROID
- case TF_LOGIN_APPLICATION: {
- /*
- * Compute SHA-1 hash of the application fully-qualified path
- * name. Truncate the hash to 16 bytes and send it as login
- * data. Update message size.
- */
- u8 pSHA1Hash[SHA1_DIGEST_SIZE];
-
- error = tf_hash_application_path_and_data(pSHA1Hash,
- NULL, 0);
- if (error != 0) {
- dprintk(KERN_ERR "tf_open_client_session: "
- "error in tf_hash_application_path_and_data\n");
- goto error;
- }
- memcpy(&command->open_client_session.login_data,
- pSHA1Hash, 16);
- command->open_client_session.login_type =
- TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
- /* 16 bytes */
- command->open_client_session.message_size += 4;
- break;
- }
-#else
case TF_LOGIN_APPLICATION:
/*
* Send the real UID of the calling application in the login
@@ -767,36 +735,7 @@ int tf_open_client_session(
/* Added one word */
command->open_client_session.message_size += 1;
break;
-#endif
-#ifndef CONFIG_ANDROID
- case TF_LOGIN_APPLICATION_USER: {
- /*
- * Compute SHA-1 hash of the concatenation of the application
- * fully-qualified path name and the EUID of the calling
- * application. Truncate the hash to 16 bytes and send it as
- * login data. Update message size.
- */
- u8 pSHA1Hash[SHA1_DIGEST_SIZE];
-
- error = tf_hash_application_path_and_data(pSHA1Hash,
- (u8 *) &(current_euid()), sizeof(current_euid()));
- if (error != 0) {
- dprintk(KERN_ERR "tf_open_client_session: "
- "error in tf_hash_application_path_and_data\n");
- goto error;
- }
- memcpy(&command->open_client_session.login_data,
- pSHA1Hash, 16);
- command->open_client_session.login_type =
- TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
-
- /* 16 bytes */
- command->open_client_session.message_size += 4;
-
- break;
- }
-#else
case TF_LOGIN_APPLICATION_USER:
/*
* Send the real UID and the EUID of the calling application in
@@ -813,49 +752,7 @@ int tf_open_client_session(
/* Added two words */
command->open_client_session.message_size += 2;
break;
-#endif
-#ifndef CONFIG_ANDROID
- case TF_LOGIN_APPLICATION_GROUP: {
- /*
- * Check requested GID. Compute SHA-1 hash of the concatenation
- * of the application fully-qualified path name and the
- * requested GID. Update message size
- */
- gid_t requested_gid;
- u8 pSHA1Hash[SHA1_DIGEST_SIZE];
-
- requested_gid = *(u32 *) &command->open_client_session.
- login_data;
-
- if (!tf_check_gid(requested_gid)) {
- dprintk(KERN_ERR "tf_open_client_session(%p) "
- "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
- "does not match real eGID (0x%x)"
- "or any of the supplementary GIDs\n",
- connection, requested_gid, current_egid());
- error = -EACCES;
- goto error;
- }
-
- error = tf_hash_application_path_and_data(pSHA1Hash,
- &requested_gid, sizeof(u32));
- if (error != 0) {
- dprintk(KERN_ERR "tf_open_client_session: "
- "error in tf_hash_application_path_and_data\n");
- goto error;
- }
-
- memcpy(&command->open_client_session.login_data,
- pSHA1Hash, 16);
- command->open_client_session.login_type =
- TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
-
- /* 16 bytes */
- command->open_client_session.message_size += 4;
- break;
- }
-#else
case TF_LOGIN_APPLICATION_GROUP: {
/*
* Check requested GID. Send the real UID and the requested GID
@@ -889,18 +786,17 @@ int tf_open_client_session(
break;
}
-#endif
case TF_LOGIN_PRIVILEGED:
/* A privileged login may be performed only on behalf of the
kernel itself or on behalf of a process with euid=0 or
- egid=0. */
+ egid=0 or euid=system or egid=system. */
if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
dprintk(KERN_DEBUG "tf_open_client_session: "
"TF_LOGIN_PRIVILEGED for kernel API\n");
- command->open_client_session.login_type =
- TF_LOGIN_PRIVILEGED_KERNEL;
- } else if (current_euid() != 0 && current_egid() != 0) {
+ } else if ((current_euid() != TF_PRIVILEGED_UID_GID) &&
+ (current_egid() != TF_PRIVILEGED_UID_GID) &&
+ (current_euid() != 0) && (current_egid() != 0)) {
dprintk(KERN_ERR "tf_open_client_session: "
" user %d, group %d not allowed to open "
"session with TF_LOGIN_PRIVILEGED\n",
@@ -911,9 +807,9 @@ int tf_open_client_session(
dprintk(KERN_DEBUG "tf_open_client_session: "
"TF_LOGIN_PRIVILEGED for %u:%u\n",
current_euid(), current_egid());
- command->open_client_session.login_type =
- TF_LOGIN_PRIVILEGED;
}
+ command->open_client_session.login_type =
+ TF_LOGIN_PRIVILEGED;
break;
case TF_LOGIN_AUTHENTICATION: {
@@ -1088,7 +984,8 @@ int tf_register_shared_memory(
/* Initialize message_size with no descriptors */
msg->message_size
- = (sizeof(struct tf_command_register_shared_memory) -
+ = (offsetof(struct tf_command_register_shared_memory,
+ shared_mem_descriptors) -
sizeof(struct tf_command_header)) / 4;
/* Map the shmem block and update the message */
@@ -1096,7 +993,7 @@ int tf_register_shared_memory(
/* Empty shared mem */
msg->shared_mem_start_offset = msg->shared_mem_descriptors[0];
} else {
- u32 descriptorCount;
+ u32 descriptor_count;
error = tf_map_shmem(
connection,
msg->shared_mem_descriptors[0],
@@ -1106,13 +1003,13 @@ int tf_register_shared_memory(
&(msg->shared_mem_start_offset),
msg->shared_mem_size,
&shmem_desc,
- &descriptorCount);
+ &descriptor_count);
if (error != 0) {
dprintk(KERN_ERR "tf_register_shared_memory: "
"unable to map shared memory block\n");
goto error;
}
- msg->message_size += descriptorCount;
+ msg->message_size += descriptor_count;
}
/*
@@ -1230,6 +1127,7 @@ error:
#ifdef CONFIG_TF_ION
extern struct ion_device *omap_ion_device;
#endif /* CONFIG_TF_ION */
+
/*
* Invokes a client command to the Secure World
*/
@@ -1263,7 +1161,6 @@ int tf_invoke_client_command(
for (i = 0; i < 4; i++) {
int param_type = TF_GET_PARAM_TYPE(
command->invoke_client_command.param_types, i);
-
if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
== TF_PARAM_TYPE_MEMREF_FLAG) {
@@ -1365,6 +1262,7 @@ error:
if (new_handle != NULL)
ion_free(connection->ion_client, new_handle);
#endif /* CONFIG_TF_ION */
+
/* Unmap de temp mem refs */
for (i = 0; i < 4; i++) {
if (shmem_desc[i] != NULL) {
diff --git a/security/smc/tf_conn.h b/security/smc/tf_conn.h
index d2c8261..35d2a2b 100644
--- a/security/smc/tf_conn.h
+++ b/security/smc/tf_conn.h
@@ -32,6 +32,24 @@ static inline struct tf_connection *tf_conn_from_file(
return file->private_data;
}
+int tf_validate_shmem_and_flags(u32 shmem, u32 shmem_size, u32 flags);
+
+int tf_map_shmem(
+ struct tf_connection *connection,
+ u32 buffer,
+ /* flags for read-write access rights on the memory */
+ u32 flags,
+ bool in_user_space,
+ u32 descriptors[TF_MAX_COARSE_PAGES],
+ u32 *buffer_start_offset,
+ u32 buffer_size,
+ struct tf_shmem_desc **shmem_desc,
+ u32 *descriptor_count);
+
+void tf_unmap_shmem(
+ struct tf_connection *connection,
+ struct tf_shmem_desc *shmem_desc,
+ u32 full_cleanup);
/*----------------------------------------------------------------------------
* Connection operations to the Secure World
*----------------------------------------------------------------------------*/
diff --git a/security/smc/tf_crypto.c b/security/smc/tf_crypto.c
index 7edca0f..baed463 100644
--- a/security/smc/tf_crypto.c
+++ b/security/smc/tf_crypto.c
@@ -206,67 +206,6 @@ static struct tf_shmem_desc *tf_get_shmem_from_block_handle(
}
/*------------------------------------------------------------------------- */
-/*
- * HWA public lock or unlock one HWA according algo specified by hwa_id
- */
-void tf_crypto_lock_hwa(u32 hwa_id, bool do_lock)
-{
- struct semaphore *s = NULL;
- struct tf_device *dev = tf_get_device();
-
- dprintk(KERN_INFO "[pid=%d] %s: hwa_id=0x%04X do_lock=%d\n",
- current->pid, __func__, hwa_id, do_lock);
-
- switch (hwa_id) {
- case RPC_AES1_CODE:
- s = &dev->aes1_sema;
- break;
- case RPC_DES_CODE:
- s = &dev->des_sema;
- break;
- default:
- case RPC_SHA_CODE:
- s = &dev->sha_sema;
- break;
- }
-
- if (do_lock == LOCK_HWA) {
- dprintk(KERN_INFO "tf_crypto_lock_hwa: "
- "Wait for HWAID=0x%04X\n", hwa_id);
- while (down_trylock(s))
- cpu_relax();
- dprintk(KERN_INFO "tf_crypto_lock_hwa: "
- "Locked on HWAID=0x%04X\n", hwa_id);
- } else {
- up(s);
- dprintk(KERN_INFO "tf_crypto_lock_hwa: "
- "Released for HWAID=0x%04X\n", hwa_id);
- }
-}
-
-/*------------------------------------------------------------------------- */
-/*
- * HWAs public lock or unlock HWA's specified in the HWA H/A/D fields of RPC
- * command rpc_command
- */
-static void tf_crypto_lock_hwas(u32 rpc_command, bool do_lock)
-{
- dprintk(KERN_INFO
- "tf_crypto_lock_hwas: rpc_command=0x%08x do_lock=%d\n",
- rpc_command, do_lock);
-
- /* perform the locks */
- if (rpc_command & RPC_AES1_CODE)
- tf_crypto_lock_hwa(RPC_AES1_CODE, do_lock);
-
- if (rpc_command & RPC_DES_CODE)
- tf_crypto_lock_hwa(RPC_DES_CODE, do_lock);
-
- if (rpc_command & RPC_SHA_CODE)
- tf_crypto_lock_hwa(RPC_SHA_CODE, do_lock);
-}
-
-/*------------------------------------------------------------------------- */
/**
*Initialize the public crypto DMA channels, global HWA semaphores and handles
*/
@@ -388,7 +327,8 @@ static bool tf_crypto_update(
status = tf_digest_update(
&cus->operation_state.sha,
params->input_data,
- params->input_data_length);
+ params->input_data_length,
+ TF_BUFFER_USER);
tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
break;
@@ -622,7 +562,7 @@ static bool tf_crypto_parse_command_message(struct tf_connection *connection,
return false;
atomic_inc(&input_shmem->ref_count);
- input_data = input_shmem->pBuffer +
+ input_data = input_shmem->client_buffer +
command->params[0].memref.offset;
input_data_length = command->params[0].memref.size;
@@ -658,7 +598,7 @@ static bool tf_crypto_parse_command_message(struct tf_connection *connection,
goto err0;
atomic_inc(&output_shmem->ref_count);
- output_data = output_shmem->pBuffer +
+ output_data = output_shmem->client_buffer +
command->params[1].memref.offset;
output_data_length = command->params[1].memref.size;
@@ -773,11 +713,6 @@ int tf_crypto_try_shortcuted_update(struct tf_connection *connection,
if (tf_crypto_is_shortcuted_command(connection,
(struct tf_command_invoke_client_command *) command,
&cus, false)) {
- u32 hwa_id = cus->hwa_id;
-
- /* Lock HWA */
- tf_crypto_lock_hwa(hwa_id, LOCK_HWA);
-
if (tf_crypto_is_shortcuted_command(connection,
command,
&cus, true)) {
@@ -793,10 +728,6 @@ int tf_crypto_try_shortcuted_update(struct tf_connection *connection,
/* Decrement CUS context use count */
cus->use_count--;
- /* Release HWA lock */
- tf_crypto_lock_hwa(cus->hwa_id,
- UNLOCK_HWA);
-
return -1;
}
@@ -805,10 +736,6 @@ int tf_crypto_try_shortcuted_update(struct tf_connection *connection,
/* Decrement CUS context use count */
cus->use_count--;
- /* Release HWA lock */
- tf_crypto_lock_hwa(cus->hwa_id,
- UNLOCK_HWA);
-
return -1;
}
@@ -825,10 +752,7 @@ int tf_crypto_try_shortcuted_update(struct tf_connection *connection,
/* Decrement CUS context use count */
cus->use_count--;
- tf_crypto_lock_hwa(cus->hwa_id,
- UNLOCK_HWA);
} else {
- tf_crypto_lock_hwa(hwa_id, UNLOCK_HWA);
return -1;
}
} else {
@@ -865,32 +789,30 @@ u32 tf_crypto_wait_for_ready_bit(u32 *reg, u32 bit)
static DEFINE_SPINLOCK(clk_lock);
-void tf_crypto_disable_clock(uint32_t clock_paddr)
+u32 tf_crypto_turn_off_clocks(void)
{
- u32 *clock_reg;
- u32 val;
unsigned long flags;
+ u32 ret = 0xf;
- dprintk(KERN_INFO "tf_crypto_disable_clock: " \
- "clock_paddr=0x%08X\n",
- clock_paddr);
-
- /* Ensure none concurrent access when changing clock registers */
spin_lock_irqsave(&clk_lock, flags);
- clock_reg = (u32 *)IO_ADDRESS(clock_paddr);
+ ret = tf_try_disabling_secure_hwa_clocks(0xf) & 0xff;
- val = __raw_readl(clock_reg);
- val &= ~(0x3);
- __raw_writel(val, clock_reg);
+ spin_unlock_irqrestore(&clk_lock, flags);
- /* Wait for clock to be fully disabled */
- while ((__raw_readl(clock_reg) & 0x30000) == 0)
- ;
+ if (ret == 0xff)
+ panic("Error calling API_HAL_HWATURNOFF_INDEX");
- spin_unlock_irqrestore(&clk_lock, flags);
+ return ret;
+}
- tf_l4sec_clkdm_allow_idle(true);
+void tf_crypto_disable_clock(uint32_t clock_paddr)
+{
+ dprintk(KERN_INFO "tf_crypto_disable_clock: " \
+ "clock_paddr=0x%08X\n",
+ clock_paddr);
+
+ tf_clock_timer_stop();
}
/*------------------------------------------------------------------------- */
@@ -905,14 +827,18 @@ void tf_crypto_enable_clock(uint32_t clock_paddr)
"clock_paddr=0x%08X\n",
clock_paddr);
- tf_l4sec_clkdm_wakeup(true);
-
/* Ensure none concurrent access when changing clock registers */
spin_lock_irqsave(&clk_lock, flags);
+ tf_clock_timer_start();
+
clock_reg = (u32 *)IO_ADDRESS(clock_paddr);
val = __raw_readl(clock_reg);
+
+ if ((val & 0x30000) == 0)
+ goto end;
+
val |= 0x2;
__raw_writel(val, clock_reg);
@@ -920,6 +846,7 @@ void tf_crypto_enable_clock(uint32_t clock_paddr)
while ((__raw_readl(clock_reg) & 0x30000) != 0)
;
+end:
spin_unlock_irqrestore(&clk_lock, flags);
}
@@ -1013,7 +940,7 @@ static int tf_crypto_install_shortcut_lock_hwa(
*/
if ((rpc_command & RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_LOCK) != 0) {
/*Lock the HWA */
- tf_crypto_lock_hwa(cus->hwa_id, LOCK_HWA);
+ /*tf_crypto_lock_hwa(cus->hwa_id, LOCK_HWA);*/
}
dprintk(KERN_INFO
@@ -1055,9 +982,6 @@ static int tf_crypto_lock_hwas_suspend_shortcut(
target_shortcut = suspend_cus_in->shortcut_id;
- /*lock HWAs */
- tf_crypto_lock_hwas(rpc_command, LOCK_HWA);
-
/*if suspend_cus_in->shortcut_id != 0 and if rpc_command.S != 0,
then, suspend shortcut */
if ((target_shortcut != 0) && ((rpc_command &
@@ -1206,9 +1130,6 @@ static int tf_crypto_resume_shortcut_unlock_hwas(
/* H is never set by the PA: Atomically set sham1_is_public to true */
dev->sham1_is_public = true;
- /* Unlock HWAs according rpc_command */
- tf_crypto_lock_hwas(rpc_command, UNLOCK_HWA);
-
return 0;
}
diff --git a/security/smc/tf_crypto.h b/security/smc/tf_crypto.h
index 2291439..797b082 100644
--- a/security/smc/tf_crypto.h
+++ b/security/smc/tf_crypto.h
@@ -26,6 +26,10 @@
#include <clockdomain.h>
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+#include <linux/sysdev.h>
+#endif
+
#ifdef __ASM_ARM_ARCH_OMAP_CLOCKDOMAIN_H
#define clkdm_wakeup omap2_clkdm_wakeup
#define clkdm_allow_idle omap2_clkdm_allow_idle
@@ -253,12 +257,11 @@ void tf_crypto_wait_for_ready_bit_infinitely(u32 *reg, u32 bit);
void tf_crypto_enable_clock(uint32_t clock_paddr);
void tf_crypto_disable_clock(uint32_t clock_paddr);
+u32 tf_crypto_turn_off_clocks(void);
#define LOCK_HWA true
#define UNLOCK_HWA false
-void tf_crypto_lock_hwa(u32 hwa_id, bool do_lock);
-
/*---------------------------------------------------------------------------*/
/* AES operations */
/*---------------------------------------------------------------------------*/
@@ -341,9 +344,55 @@ static inline void unregister_smc_public_crypto_digest(void) {}
*sha_state: State of the operation
*data: Input buffer to process
*data_length: Length in bytes of the input buffer.
+ *buffer_origin: TF_BUFFER_USER or TF_BUFFER_KERNEL
*/
bool tf_digest_update(
struct tf_crypto_sha_operation_state *sha_state,
- u8 *data, u32 data_length);
+ u8 *data, u32 data_length, unsigned int buffer_origin);
+
+void tf_aes_pm_resume(void);
+
+#define TF_BUFFER_USER 0
+#define TF_BUFFER_KERNEL 1
+
+#define TF_CRYPTO_ALG_MD5 0x00000001
+#define TF_CRYPTO_ALG_SHA1 0x00000002
+#define TF_CRYPTO_ALG_SHA224 0x00000004
+#define TF_CRYPTO_ALG_SHA256 0x00000008
+#define TF_CRYPTO_ALG_AES_ECB_128 0x00000100
+#define TF_CRYPTO_ALG_AES_ECB_192 0x00000200
+#define TF_CRYPTO_ALG_AES_ECB_256 0x00000400
+#define TF_CRYPTO_ALG_AES_CBC_128 0x00000800
+#define TF_CRYPTO_ALG_AES_CBC_192 0x00001000
+#define TF_CRYPTO_ALG_AES_CBC_256 0x00002000
+#define TF_CRYPTO_ALG_AES_CTR_128 0x00004000
+#define TF_CRYPTO_ALG_AES_CTR_192 0x00008000
+#define TF_CRYPTO_ALG_AES_CTR_256 0x00010000
+#define TF_CRYPTO_ALG_HMAC_MD5 0x00100000
+#define TF_CRYPTO_ALG_HMAC_SHA1 0x00200000
+#define TF_CRYPTO_ALG_HMAC_SHA224 0x00400000
+#define TF_CRYPTO_ALG_HMAC_SHA256 0x00800000
+#define TF_CRYPTO_ALG_HMAC_ALL 0x00f00000
+
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+extern unsigned tf_fault_injection_mask;
+#endif
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+
+int __init tf_crypto_hmac_module_init(void);
+void __exit tf_crypto_hmac_module_exit(void);
+
+int __init tf_self_test_register_device(void);
+void __exit tf_self_test_unregister_device(void);
+
+int tf_self_test_post_init(struct kobject *parent);
+void tf_self_test_post_exit(void);
+unsigned tf_self_test_post_vectors(void);
+
+#define TF_CRYPTO_ALG_INTEGRITY 0x04000000
+extern char *tf_integrity_hmac_sha256_expected_value;
+
+#endif /* CONFIG_TF_DRIVER_CRYPTO_FIPS */
#endif /*__TF_PUBLIC_CRYPTO_H */
diff --git a/security/smc/tf_crypto_aes.c b/security/smc/tf_crypto_aes.c
index 36dc522..07c9cdbb 100644
--- a/security/smc/tf_crypto_aes.c
+++ b/security/smc/tf_crypto_aes.c
@@ -129,6 +129,10 @@ struct aes_reg {
static struct aes_reg *paes_reg;
#ifdef CONFIG_SMC_KERNEL_CRYPTO
+#define FLAGS_MODE_MASK 0x000f
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_DECRYPT 0
+
#define FLAGS_FAST BIT(7)
#define FLAGS_BUSY 8
@@ -141,6 +145,9 @@ struct aes_hwa_ctx {
struct tasklet_struct task;
struct ablkcipher_request *req;
+ struct crypto_async_request *next_req;
+ struct crypto_async_request *backlog;
+
size_t total;
struct scatterlist *in_sg;
size_t in_offset;
@@ -161,13 +168,20 @@ struct aes_hwa_ctx {
struct tf_crypto_aes_operation_state *ctx;
};
static struct aes_hwa_ctx *aes_ctx;
+
+struct aes_reqctx {
+ u32 mode;
+};
#endif
/*---------------------------------------------------------------------------
*Forward declarations
*------------------------------------------------------------------------- */
-static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, bool is_kernel);
+static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks,
+ u32 ctrl, bool is_kernel);
+
+static bool reconfigure_dma;
/*----------------------------------------------------------------------------
*Save HWA registers into the specified operation state structure
@@ -192,9 +206,10 @@ static void tf_aes_save_registers(
*Restore the HWA registers from the operation state structure
*---------------------------------------------------------------------------*/
static void tf_aes_restore_registers(
- struct tf_crypto_aes_operation_state *aes_state)
+ struct tf_crypto_aes_operation_state *aes_state, int encrypt)
{
struct tf_device *dev = tf_get_device();
+ u32 CTRL = aes_state->CTRL;
dprintk(KERN_INFO "tf_aes_restore_registers: "
"paes_reg(%p) <- aes_state(%p): CTRL=0x%08x\n",
@@ -216,15 +231,20 @@ static void tf_aes_restore_registers(
* public crypto operations.
*/
dev->aes1_key_context = 0;
+
+ if (encrypt)
+ CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
+ else
+ CTRL = CTRL & ~AES_CTRL_DIRECTION_ENCRYPT;
} else {
- aes_state->CTRL |= INREG32(&paes_reg->AES_CTRL);
+ CTRL |= INREG32(&paes_reg->AES_CTRL);
}
/*
* Restore the IV first if we are in CBC or CTR mode
* (not required for ECB)
*/
- if (!AES_CTRL_IS_MODE_ECB(aes_state->CTRL)) {
+ if (!AES_CTRL_IS_MODE_ECB(CTRL)) {
OUTREG32(&paes_reg->AES_IV_IN_0, aes_state->AES_IV_0);
OUTREG32(&paes_reg->AES_IV_IN_1, aes_state->AES_IV_1);
OUTREG32(&paes_reg->AES_IV_IN_2, aes_state->AES_IV_2);
@@ -236,15 +256,14 @@ static void tf_aes_restore_registers(
* it leads to break the HWA process (observed by experimentation)
*/
- aes_state->CTRL = (aes_state->CTRL & (3 << 3)) /* key size */
- | (aes_state->CTRL & ((1 << 2) | (1 << 5) | (1 << 6)))
+ CTRL = (CTRL & (3 << 3)) /* key size */
+ | (CTRL & ((1 << 2) | (1 << 5) | (1 << 6)))
| (0x3 << 7) /* Always set CTR_WIDTH to 128-bit */;
- if ((aes_state->CTRL & 0x1FC) !=
- (INREG32(&paes_reg->AES_CTRL) & 0x1FC))
- OUTREG32(&paes_reg->AES_CTRL, aes_state->CTRL & 0x1FC);
+ if ((CTRL & 0x1FC) != (INREG32(&paes_reg->AES_CTRL) & 0x1FC))
+ OUTREG32(&paes_reg->AES_CTRL, CTRL & 0x1FC);
- /* Set the SYSCONFIG register to 0 */
+ /* Reset the SYSCONFIG register */
OUTREG32(&paes_reg->AES_SYSCONFIG, 0);
}
@@ -262,6 +281,36 @@ void tf_aes_exit(void)
omap_iounmap(paes_reg);
}
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+#define FAULTY(mask, ctrl, chaining, key_size) \
+ (((mask) & TF_CRYPTO_ALG_AES_##chaining##_##key_size) && \
+ AES_CTRL_IS_MODE_##chaining((ctrl)) && \
+ AES_CTRL_GET_KEY_SIZE((ctrl)) == AES_CTRL_KEY_SIZE_##key_size)
+static void tf_aes_fault_injection(u32 ctrl, u8 *buf)
+{
+ if (FAULTY(tf_fault_injection_mask, ctrl, ECB, 128) ||
+ FAULTY(tf_fault_injection_mask, ctrl, ECB, 192) ||
+ FAULTY(tf_fault_injection_mask, ctrl, ECB, 256) ||
+ FAULTY(tf_fault_injection_mask, ctrl, CBC, 128) ||
+ FAULTY(tf_fault_injection_mask, ctrl, CBC, 192) ||
+ FAULTY(tf_fault_injection_mask, ctrl, CBC, 256) ||
+ FAULTY(tf_fault_injection_mask, ctrl, CTR, 128) ||
+ FAULTY(tf_fault_injection_mask, ctrl, CTR, 192) ||
+ FAULTY(tf_fault_injection_mask, ctrl, CTR, 256)) {
+ pr_warning("TF: injecting fault in AES!\n");
+ buf[0] = 0xff;
+ buf[1] ^= 0xff;
+ } else {
+ dprintk(KERN_INFO "%s: no fault "
+ "(mask=0x%x ctrl=0x%x mode=%u key_size=%u)\n",
+ __func__, tf_fault_injection_mask,
+ ctrl, AES_CTRL_GET_MODE(ctrl),
+ AES_CTRL_GET_KEY_SIZE(ctrl));
+ }
+}
+#undef FAULTY
+#endif
+
bool tf_aes_update(struct tf_crypto_aes_operation_state *aes_state,
u8 *src, u8 *dest, u32 nb_blocks)
{
@@ -301,11 +350,12 @@ bool tf_aes_update(struct tf_crypto_aes_operation_state *aes_state,
}
/*Restore the registers of the accelerator from the operation state */
- tf_aes_restore_registers(aes_state);
+ tf_aes_restore_registers(aes_state, 0);
if (dma_use == PUBLIC_CRYPTO_DMA_USE_IRQ) {
/* Perform the update with DMA */
- if (!tf_aes_update_dma(src, dest, nb_blocks, is_kernel))
+ if (!tf_aes_update_dma(src, dest, nb_blocks,
+ aes_state->CTRL, is_kernel))
return false;
} else {
u8 buf[DMA_TRIGGER_IRQ_AES];
@@ -373,6 +423,9 @@ bool tf_aes_update(struct tf_crypto_aes_operation_state *aes_state,
process_dest += 4;
}
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+ tf_aes_fault_injection(paes_reg->AES_CTRL, buf);
+#endif
if (!is_kernel)
if (copy_to_user(dest, buf,
nb_blocks * AES_BLOCK_SIZE))
@@ -398,7 +451,8 @@ bool tf_aes_update(struct tf_crypto_aes_operation_state *aes_state,
* | PUBLIC_CRYPTO_DMA_USE_POLLING (poll the end of DMA)
*output: dest : pointer of the output data (can be eq to src)
*/
-static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, bool is_kernel)
+static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks,
+ u32 ctrl, bool is_kernel)
{
/*
*Note: The DMA only sees physical addresses !
@@ -539,6 +593,11 @@ static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, bool is_kernel)
*The dma transfer is complete
*/
+ pr_info("%s completing\n", __func__);
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+ tf_aes_fault_injection(ctrl, dev->dma_buffer);
+#endif
+
/*The DMA output is in the preallocated aligned buffer
*and needs to be copied to the output buffer.*/
if (!is_kernel) {
@@ -577,6 +636,10 @@ static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks, bool is_kernel)
* AES HWA registration into kernel crypto framework
*/
+#define CRYPTO_TFM_REQ_DMA_VISIBLE 0x80000000
+
+static void __iomem *omap_dma_base;
+
static void sg_copy_buf(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out)
{
@@ -630,13 +693,12 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx)
struct omap_dma_channel_params dma_params;
struct tf_crypto_aes_operation_state *state =
crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
+ static size_t last_count;
- if (sg_is_last(ctx->in_sg) && sg_is_last(ctx->out_sg)) {
- in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32));
- out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32));
+ in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32));
+ out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32));
- fast = in && out;
- }
+ fast = in && out;
if (fast) {
count = min(ctx->total, sg_dma_len(ctx->in_sg));
@@ -645,15 +707,20 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx)
if (count != ctx->total)
return -EINVAL;
- err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
- if (!err)
- return -EINVAL;
-
- err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
- if (!err) {
- dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
- return -EINVAL;
+ /* Only call dma_map_sg if it has not yet been done */
+ if (!(ctx->req->base.flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
+ err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ if (!err)
+ return -EINVAL;
+
+ err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
+ if (!err) {
+ dma_unmap_sg(
+ NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ return -EINVAL;
+ }
}
+ ctx->req->base.flags &= ~CRYPTO_TFM_REQ_DMA_VISIBLE;
addr_in = sg_dma_address(ctx->in_sg);
addr_out = sg_dma_address(ctx->out_sg);
@@ -662,7 +729,6 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx)
} else {
count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in,
ctx->buflen, ctx->total, 0);
-
addr_in = ctx->dma_addr_in;
addr_out = ctx->dma_addr_out;
@@ -671,12 +737,10 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx)
ctx->total -= count;
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
-
/* Configure HWA */
tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
- tf_aes_restore_registers(state);
+ tf_aes_restore_registers(state, ctx->flags & FLAGS_ENCRYPT ? 1 : 0);
OUTREG32(&paes_reg->AES_SYSCONFIG, INREG32(&paes_reg->AES_SYSCONFIG)
| AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
@@ -695,6 +759,8 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx)
dma_params.dst_ei = 0;
dma_params.dst_fi = 0;
dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+ dma_params.read_prio = 0;
+ dma_params.write_prio = 0;
/* IN */
dma_params.trigger = ctx->dma_in;
@@ -704,11 +770,25 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx)
dma_params.src_start = addr_in;
dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
- omap_set_dma_params(ctx->dma_lch_in, &dma_params);
-
- omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
- omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
- omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);
+ if (reconfigure_dma) {
+ omap_set_dma_params(ctx->dma_lch_in, &dma_params);
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_in,
+ OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_in,
+ OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);
+ } else {
+ if (last_count != count)
+ omap_set_dma_transfer_params(ctx->dma_lch_in,
+ dma_params.data_type,
+ dma_params.elem_count, dma_params.frame_count,
+ dma_params.sync_mode, dma_params.trigger,
+ dma_params.src_or_dst_synch);
+
+ /* Configure input start address */
+ __raw_writel(dma_params.src_start,
+ omap_dma_base + (0x60 * (ctx->dma_lch_in) + 0x9c));
+ }
/* OUT */
dma_params.trigger = ctx->dma_out;
@@ -718,16 +798,30 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx)
dma_params.dst_start = addr_out;
dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
- omap_set_dma_params(ctx->dma_lch_out, &dma_params);
-
- omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
- omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
- omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);
+ if (reconfigure_dma) {
+ omap_set_dma_params(ctx->dma_lch_out, &dma_params);
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_out,
+ OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_out,
+ OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);
+ reconfigure_dma = false;
+ } else {
+ if (last_count != count) {
+ omap_set_dma_transfer_params(ctx->dma_lch_out,
+ dma_params.data_type,
+ dma_params.elem_count, dma_params.frame_count,
+ dma_params.sync_mode, dma_params.trigger,
+ dma_params.src_or_dst_synch);
+ last_count = count;
+ }
+ /* Configure output start address */
+ __raw_writel(dma_params.dst_start,
+ omap_dma_base + (0x60 * (ctx->dma_lch_out) + 0xa0));
+ }
/* Is this really needed? */
- omap_disable_dma_irq(ctx->dma_lch_in, OMAP_DMA_DROP_IRQ);
omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ);
- omap_disable_dma_irq(ctx->dma_lch_out, OMAP_DMA_DROP_IRQ);
omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ);
wmb();
@@ -735,6 +829,33 @@ static int aes_dma_start(struct aes_hwa_ctx *ctx)
omap_start_dma(ctx->dma_lch_in);
omap_start_dma(ctx->dma_lch_out);
+ if (ctx->next_req) {
+ struct ablkcipher_request *req =
+ ablkcipher_request_cast(ctx->next_req);
+
+ if (!(ctx->next_req->flags & CRYPTO_TFM_REQ_DMA_VISIBLE)) {
+ err = dma_map_sg(NULL, req->src, 1, DMA_TO_DEVICE);
+ if (!err)
+ /* Silently fail for now... */
+ return 0;
+
+ err = dma_map_sg(NULL, req->dst, 1, DMA_FROM_DEVICE);
+ if (!err) {
+ dma_unmap_sg(NULL, req->src, 1, DMA_TO_DEVICE);
+ /* Silently fail for now... */
+ return 0;
+ }
+
+ ctx->next_req->flags |= CRYPTO_TFM_REQ_DMA_VISIBLE;
+ ctx->next_req = NULL;
+ }
+ }
+
+ if (ctx->backlog) {
+ ctx->backlog->complete(ctx->backlog, -EINPROGRESS);
+ ctx->backlog = NULL;
+ }
+
return 0;
}
@@ -760,25 +881,30 @@ static int aes_dma_stop(struct aes_hwa_ctx *ctx)
OUTREG32(&paes_reg->AES_SYSCONFIG, 0);
- tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
-
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
omap_stop_dma(ctx->dma_lch_in);
omap_stop_dma(ctx->dma_lch_out);
- if (ctx->flags & FLAGS_FAST) {
- dma_unmap_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
- dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
- } else {
+ if (!(ctx->flags & FLAGS_FAST)) {
dma_sync_single_for_device(NULL, ctx->dma_addr_out,
ctx->dma_size, DMA_FROM_DEVICE);
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+ tf_aes_fault_injection(paes_reg->AES_CTRL, ctx->buf_out);
+#endif
+
/* Copy data */
count = sg_copy(&ctx->out_sg, &ctx->out_offset, ctx->buf_out,
ctx->buflen, ctx->dma_size, 1);
if (count != ctx->dma_size)
err = -EINVAL;
+ } else {
+ dma_unmap_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+ tf_aes_fault_injection(paes_reg->AES_CTRL,
+ sg_virt(ctx->out_sg));
+#endif
}
if (err || !ctx->total)
@@ -791,6 +917,10 @@ static void aes_dma_callback(int lch, u16 ch_status, void *data)
{
struct aes_hwa_ctx *ctx = data;
+ if (ch_status != OMAP_DMA_BLOCK_IRQ)
+ printk(KERN_ERR "%s: DMA error status: 0x%hx\n",
+ __func__, ch_status);
+
if (lch == ctx->dma_lch_out)
tasklet_schedule(&ctx->task);
}
@@ -798,6 +928,7 @@ static void aes_dma_callback(int lch, u16 ch_status, void *data)
static int aes_dma_init(struct aes_hwa_ctx *ctx)
{
int err = -ENOMEM;
+ struct omap_dma_channel_params dma_params;
ctx->dma_lch_out = -1;
ctx->dma_lch_in = -1;
@@ -839,6 +970,46 @@ static int aes_dma_init(struct aes_hwa_ctx *ctx)
goto err_dma_out;
}
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
+ dma_params.src_ei = 0;
+ dma_params.src_fi = 0;
+ dma_params.dst_ei = 0;
+ dma_params.dst_fi = 0;
+ dma_params.read_prio = 0;
+ dma_params.write_prio = 0;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+
+ /* IN */
+ dma_params.trigger = ctx->dma_in;
+ dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+ dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
+ dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
+
+ omap_set_dma_params(ctx->dma_lch_in, &dma_params);
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_data_pack(ctx->dma_lch_in, 1);
+
+ /* OUT */
+ dma_params.trigger = ctx->dma_out;
+ dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+ dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
+ dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+
+ omap_set_dma_params(ctx->dma_lch_out, &dma_params);
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_8);
+ omap_set_dma_dest_data_pack(ctx->dma_lch_out, 1);
+
+ omap_dma_base = ioremap(0x4A056000, 0x1000);
+ if (!omap_dma_base) {
+ printk(KERN_ERR "SMC: Unable to ioremap DMA registers\n");
+ goto err_dma_out;
+ }
+
dprintk(KERN_INFO "aes_dma_init(%p) configured DMA channels"
"(RX = %d, TX = %d)\n", ctx, ctx->dma_lch_in, ctx->dma_lch_out);
@@ -859,20 +1030,22 @@ static void aes_dma_cleanup(struct aes_hwa_ctx *ctx)
omap_free_dma(ctx->dma_lch_in);
dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in);
dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out);
+ iounmap(omap_dma_base);
}
static int aes_handle_req(struct aes_hwa_ctx *ctx)
{
struct tf_crypto_aes_operation_state *state;
- struct crypto_async_request *async_req, *backlog;
+ struct crypto_async_request *async_req;
struct ablkcipher_request *req;
+ struct aes_reqctx *rctx;
unsigned long flags;
if (ctx->total)
goto start;
spin_lock_irqsave(&ctx->lock, flags);
- backlog = crypto_get_backlog(&ctx->queue);
+ ctx->backlog = crypto_get_backlog(&ctx->queue);
async_req = crypto_dequeue_request(&ctx->queue);
if (!async_req)
clear_bit(FLAGS_BUSY, &ctx->flags);
@@ -881,9 +1054,6 @@ static int aes_handle_req(struct aes_hwa_ctx *ctx)
if (!async_req)
return 0;
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
-
req = ablkcipher_request_cast(async_req);
ctx->req = req;
@@ -893,6 +1063,27 @@ static int aes_handle_req(struct aes_hwa_ctx *ctx)
ctx->out_offset = 0;
ctx->out_sg = req->dst;
+ rctx = ablkcipher_request_ctx(req);
+ rctx->mode &= FLAGS_MODE_MASK;
+ ctx->flags = (ctx->flags & ~FLAGS_MODE_MASK) | rctx->mode;
+
+ /*
+ * Try to get the next pending request so it can be prepared while the
+ * first one is being processed.
+ */
+ spin_lock_irqsave(&ctx->lock, flags);
+
+ if (likely(ctx->queue.qlen)) {
+ struct list_head *next_async_req;
+
+ next_async_req = ctx->queue.list.next;
+ ctx->next_req = list_entry(next_async_req,
+ struct crypto_async_request, list);
+ } else {
+ ctx->next_req = NULL;
+ }
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
@@ -959,9 +1150,6 @@ static int aes_operate(struct ablkcipher_request *req)
unsigned long flags;
int err;
- /* Make sure AES HWA is accessible */
- tf_delayed_secure_resume();
-
spin_lock_irqsave(&aes_ctx->lock, flags);
err = ablkcipher_enqueue_request(&aes_ctx->queue, req);
spin_unlock_irqrestore(&aes_ctx->lock, flags);
@@ -974,110 +1162,91 @@ static int aes_operate(struct ablkcipher_request *req)
static int aes_encrypt(struct ablkcipher_request *req)
{
- struct tf_crypto_aes_operation_state *state =
- crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ struct aes_reqctx *rctx = ablkcipher_request_ctx(req);
- state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
+ rctx->mode = FLAGS_ENCRYPT;
return aes_operate(req);
}
static int aes_decrypt(struct ablkcipher_request *req)
{
- struct tf_crypto_aes_operation_state *state =
- crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+ struct aes_reqctx *rctx = ablkcipher_request_ctx(req);
- state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT);
- state->CTRL |= AES_CTRL_DIRECTION_DECRYPT;
+ rctx->mode = FLAGS_DECRYPT;
return aes_operate(req);
}
+
+static void aes_sync_op_complete(
+ struct crypto_async_request *async_req, int err)
+{
+ struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+
+ /* Notify crypto operation has finished */
+ complete((struct completion *) req->base.data);
+}
+
static int aes_sync_operate(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+ unsigned int nbytes, int encrypt)
{
- struct crypto_tfm *tfm = crypto_blkcipher_tfm(desc->tfm);
- struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm);
- struct blkcipher_walk walk;
+ struct ablkcipher_request *req;
+ struct aes_reqctx *rctx;
int err;
+ DECLARE_COMPLETION(aes_sync_op_completion);
if (nbytes % AES_BLOCK_SIZE)
return -EINVAL;
- /* Make sure AES HWA is accessible */
- tf_delayed_secure_resume();
-
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
- tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+ req = kmalloc(sizeof(struct ablkcipher_request) +
+ sizeof(struct aes_reqctx), GFP_KERNEL);
+ if (req == NULL)
+ return -ENOMEM;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ req->base.tfm = crypto_blkcipher_tfm(desc->tfm);
+ ablkcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+ ablkcipher_request_set_callback(req, desc->flags,
+ aes_sync_op_complete, &aes_sync_op_completion);
- if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
- u32 *ptr = (u32 *) walk.iv;
+ rctx = ablkcipher_request_ctx(req);
+ rctx->mode = encrypt ? FLAGS_ENCRYPT : FLAGS_DECRYPT;
- state->AES_IV_0 = ptr[0];
- state->AES_IV_1 = ptr[1];
- state->AES_IV_2 = ptr[2];
- state->AES_IV_3 = ptr[3];
- }
-
- while ((nbytes = walk.nbytes)) {
- if (!tf_aes_update(state, walk.src.virt.addr,
- walk.dst.virt.addr, nbytes / AES_BLOCK_SIZE)) {
- err = -EINVAL;
- break;
- }
-
- /* tf_aes_update processes all the data */
- nbytes = 0;
+ err = aes_operate(req);
+ switch (err) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ break;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ default:
+ return err;
}
- if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
- u32 *ptr = (u32 *) walk.iv;
+ /* Wait for crypto operation to be actually finished */
+ wait_for_completion(&aes_sync_op_completion);
- ptr[0] = state->AES_IV_0;
- ptr[1] = state->AES_IV_1;
- ptr[2] = state->AES_IV_2;
- ptr[3] = state->AES_IV_3;
- }
-
- tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
+ kzfree(req);
- return err;
+ return 0;
}
static int aes_sync_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct crypto_tfm *tfm = crypto_blkcipher_tfm(desc->tfm);
- struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm);
-
- state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
-
dprintk(KERN_INFO "aes_sync_encrypt nbytes=0x%x\n", nbytes);
- return aes_sync_operate(desc, dst, src, nbytes);
+ return aes_sync_operate(desc, dst, src, nbytes, 1);
}
static int aes_sync_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct crypto_tfm *tfm = crypto_blkcipher_tfm(desc->tfm);
- struct tf_crypto_aes_operation_state *state = crypto_tfm_ctx(tfm);
-
- state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT);
- state->CTRL |= AES_CTRL_DIRECTION_DECRYPT;
-
dprintk(KERN_INFO "aes_sync_decrypt\n");
- return aes_sync_operate(desc, dst, src, nbytes);
+ return aes_sync_operate(desc, dst, src, nbytes, 0);
}
/* AES ECB */
@@ -1153,6 +1322,17 @@ static int aes_ctr_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
return aes_setkey(state, key, keylen);
}
+static int aes_cra_init(struct crypto_tfm *tfm)
+{
+ tfm->crt_ablkcipher.reqsize = sizeof(struct aes_reqctx);
+
+ return 0;
+}
+
+static void aes_cra_exit(struct crypto_tfm *tfm)
+{
+}
+
static struct crypto_alg smc_aes_ecb_sync_alg = {
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_priority = 999,
@@ -1236,6 +1416,8 @@ static struct crypto_alg smc_aes_ecb_alg = {
sizeof(struct tf_crypto_aes_operation_state),
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(smc_aes_ecb_alg.cra_list),
+ .cra_init = aes_cra_init,
+ .cra_exit = aes_cra_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1259,6 +1441,8 @@ static struct crypto_alg smc_aes_cbc_alg = {
sizeof(struct tf_crypto_aes_operation_state),
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(smc_aes_cbc_alg.cra_list),
+ .cra_init = aes_cra_init,
+ .cra_exit = aes_cra_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1283,6 +1467,8 @@ static struct crypto_alg smc_aes_ctr_alg = {
sizeof(struct tf_crypto_aes_operation_state),
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(smc_aes_ctr_alg.cra_list),
+ .cra_init = aes_cra_init,
+ .cra_exit = aes_cra_exit,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1300,11 +1486,15 @@ int register_smc_public_crypto_aes(void)
{
int ret;
+ tf_crypto_enable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+ OUTREG32(&paes_reg->AES_SYSCONFIG, 0);
+ tf_crypto_disable_clock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
aes_ctx = kzalloc(sizeof(struct aes_hwa_ctx), GFP_KERNEL);
if (aes_ctx == NULL)
return -ENOMEM;
- crypto_init_queue(&aes_ctx->queue, 1);
+ crypto_init_queue(&aes_ctx->queue, 50);
tasklet_init(&aes_ctx->task, aes_tasklet, (unsigned long)aes_ctx);
spin_lock_init(&aes_ctx->lock);
@@ -1377,4 +1567,10 @@ void unregister_smc_public_crypto_aes(void)
tasklet_kill(&aes_ctx->task);
kfree(aes_ctx);
}
-#endif
+
+#endif /* CONFIG_SMC_KERNEL_CRYPTO */
+
+void tf_aes_pm_resume(void)
+{
+ reconfigure_dma = true;
+}
diff --git a/security/smc/tf_crypto_digest.c b/security/smc/tf_crypto_digest.c
index d69a97f..8023839 100644
--- a/security/smc/tf_crypto_digest.c
+++ b/security/smc/tf_crypto_digest.c
@@ -151,12 +151,25 @@ static const u8 sha256OverEmptyString[] = {
static void tf_digest_hw_perform_64b(u32 *data,
u32 algo, u32 bytes_processed);
static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
- u32 algo, u32 bytes_processed);
+ u32 algo, u32 bytes_processed,
+ unsigned int buffer_origin);
static bool tf_digest_update_dma(
struct tf_crypto_sha_operation_state *sha_state,
- u8 *data, u32 data_length);
+ u8 *data, u32 data_length, unsigned int buffer_origin);
+/*------------------------------------------------------------------------- */
+
+static unsigned long tf_cpy_from(
+void *to, const void *from, unsigned long n, unsigned int buffer_origin)
+{
+ if (buffer_origin == TF_BUFFER_KERNEL) {
+ memcpy(to, from, n);
+ return 0;
+ } else {
+ return copy_from_user(to, from, n);
+ }
+}
/*-------------------------------------------------------------------------
*Save HWA registers into the specified operation state structure
@@ -164,8 +177,7 @@ static bool tf_digest_update_dma(
static void tf_digest_save_registers(
struct tf_crypto_sha_operation_state *sha_state)
{
- dprintk(KERN_INFO "tf_digest_save_registers: State=%p\n",
- sha_state);
+ dpr_info("%s: State=%p\n", __func__, sha_state);
sha_state->SHA_DIGEST_A = INREG32(&sha1_md5_reg->IDIGEST_A);
sha_state->SHA_DIGEST_B = INREG32(&sha1_md5_reg->IDIGEST_B);
@@ -183,8 +195,7 @@ static void tf_digest_save_registers(
static void tf_digest_restore_registers(
struct tf_crypto_sha_operation_state *sha_state)
{
- dprintk(KERN_INFO "tf_digest_restore_registers: State=%p\n",
- sha_state);
+ dpr_info("%s: State=%p\n", __func__, sha_state);
if (sha_state->bytes_processed != 0) {
/*
@@ -219,7 +230,7 @@ void tf_digest_exit(void)
}
bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
- u8 *data, u32 data_length)
+ u8 *data, u32 data_length, unsigned int buffer_origin)
{
u32 dma_use = PUBLIC_CRYPTO_DMA_USE_NONE;
@@ -229,15 +240,13 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
if (data_length >= DMA_TRIGGER_IRQ_DIGEST)
dma_use = PUBLIC_CRYPTO_DMA_USE_IRQ;
- dprintk(KERN_INFO "tf_digest_update : "\
- "Data=0x%08x/%u, Chunk=%u, Processed=%u, dma_use=0x%08x\n",
- (u32)data, (u32)data_length,
+ dpr_info("%s: Data=0x%08x/%u, Chunk=%u, Processed=%u, dma_use=0x%08x\n",
+ __func__, (u32)data, (u32)data_length,
sha_state->chunk_length, sha_state->bytes_processed,
dma_use);
if (data_length == 0) {
- dprintk(KERN_INFO "tf_digest_update: "\
- "Nothing to process\n");
+ dpr_info("%s: Nothing to process\n", __func__);
return true;
}
@@ -249,7 +258,8 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
tf_digest_restore_registers(sha_state);
/*perform the updates with DMA */
- if (!tf_digest_update_dma(sha_state, data, data_length))
+ if (!tf_digest_update_dma(
+ sha_state, data, data_length, buffer_origin))
return false;
/* Save the accelerator registers into the operation state */
@@ -274,10 +284,11 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
/*So we fill the chunk buffer with the new data to
*complete to 64B */
- if (copy_from_user(
+ if (tf_cpy_from(
sha_state->chunk_buffer+sha_state->chunk_length,
data,
- vLengthToComplete))
+ vLengthToComplete,
+ buffer_origin))
return false;
if (sha_state->chunk_length + data_length ==
@@ -285,9 +296,8 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
/*We'll keep some data for the final */
sha_state->chunk_length =
HASH_BLOCK_BYTES_LENGTH;
- dprintk(KERN_INFO "tf_digest_update: "\
- "Done: Chunk=%u; Processed=%u\n",
- sha_state->chunk_length,
+ dpr_info("%s: Done: Chunk=%u; Processed=%u\n",
+ __func__, sha_state->chunk_length,
sha_state->bytes_processed);
return true;
}
@@ -332,10 +342,11 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
*/
/*We copy the data to process to an aligned
*buffer */
- if (copy_from_user(
+ if (tf_cpy_from(
pTempAlignedBuffer,
data,
- HASH_BLOCK_BYTES_LENGTH))
+ HASH_BLOCK_BYTES_LENGTH,
+ buffer_origin))
return false;
/*Then we send this buffer to the hash
@@ -368,18 +379,18 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
/*So we fill the chunk buffer with the new data to
*complete to 64B */
- if (copy_from_user(
+ if (tf_cpy_from(
sha_state->chunk_buffer+sha_state->chunk_length,
data,
- data_length))
+ data_length,
+ buffer_origin))
return false;
sha_state->chunk_length += data_length;
}
}
- dprintk(KERN_INFO "tf_digest_update: Done: "\
- "Chunk=%u; Processed=%u\n",
- sha_state->chunk_length, sha_state->bytes_processed);
+ dpr_info("%s: Done: Chunk=%u; Processed=%u\n",
+ __func__, sha_state->chunk_length, sha_state->bytes_processed);
return true;
}
@@ -443,7 +454,8 @@ static void tf_digest_hw_perform_64b(u32 *data,
/*------------------------------------------------------------------------- */
static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
- u32 algo, u32 bytes_processed)
+ u32 algo, u32 bytes_processed,
+ unsigned int buffer_origin)
{
/*
*Note: The DMA only sees physical addresses !
@@ -455,9 +467,9 @@ static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
u32 algo_constant;
struct tf_device *dev = tf_get_device();
- dprintk(KERN_INFO
- "tf_digest_hw_perform_dma: Buffer=0x%08x/%u\n",
- (u32)data, (u32)nDataLength);
+ dpr_info(
+ "%s: Buffer=0x%08x/%u\n",
+ __func__, (u32)data, (u32)nDataLength);
/*lock the DMA */
mutex_lock(&dev->sm.dma_mutex);
@@ -486,7 +498,8 @@ static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
* buffer which has correct properties from efficient DMA
* transfers.
*/
- if (copy_from_user(dev->dma_buffer, data, length_loop)) {
+ if (tf_cpy_from(
+ dev->dma_buffer, data, length_loop, buffer_origin)) {
omap_free_dma(dma_ch0);
mutex_unlock(&dev->sm.dma_mutex);
return false;
@@ -568,9 +581,9 @@ static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
*/
static bool tf_digest_update_dma(
struct tf_crypto_sha_operation_state *sha_state,
- u8 *data, u32 data_length)
+ u8 *data, u32 data_length, unsigned int buffer_origin)
{
- dprintk(KERN_INFO "tf_digest_update_dma\n");
+ dpr_info("%s\n", __func__);
if (sha_state->chunk_length != 0) {
@@ -581,9 +594,9 @@ static bool tf_digest_update_dma(
chunk_length + data_length <= HASH_BLOCK_BYTES_LENGTH) {
/*So we fill the chunk buffer with the new data */
- if (copy_from_user(sha_state->chunk_buffer +
+ if (tf_cpy_from(sha_state->chunk_buffer +
sha_state->chunk_length, data,
- data_length))
+ data_length, buffer_origin))
return false;
sha_state->chunk_length += data_length;
@@ -597,9 +610,9 @@ static bool tf_digest_update_dma(
if (vLengthToComplete != 0) {
/*So we fill the chunk buffer with the new data to
*complete to 64B */
- if (copy_from_user(sha_state->chunk_buffer +
+ if (tf_cpy_from(sha_state->chunk_buffer +
sha_state->chunk_length, data,
- vLengthToComplete))
+ vLengthToComplete, buffer_origin))
return false;
}
@@ -631,7 +644,8 @@ static bool tf_digest_update_dma(
}
if (!tf_digest_hw_perform_dma(data, vDmaProcessize,
- sha_state->CTRL, sha_state->bytes_processed))
+ sha_state->CTRL, sha_state->bytes_processed,
+ buffer_origin))
return false;
sha_state->bytes_processed =
@@ -646,7 +660,8 @@ static bool tf_digest_update_dma(
return false;
/*We now fill the chunk buffer with the remaining data */
- if (copy_from_user(sha_state->chunk_buffer, data, data_length))
+ if (tf_cpy_from(
+ sha_state->chunk_buffer, data, data_length, buffer_origin))
return false;
sha_state->chunk_length = data_length;
@@ -693,6 +708,24 @@ static int static_Hash_HwReadDigest(u32 algo, u8 *out)
out[idx++] = (u8) ((tmp >> 24) & 0xff);
}
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+#define FAULTY(mask, ctrl_algo, alg_name) \
+ (((mask) & TF_CRYPTO_ALG_##alg_name) && \
+ (ctrl_algo) == DIGEST_CTRL_ALGO_##alg_name)
+ if (FAULTY(tf_fault_injection_mask, algo, MD5) ||
+ FAULTY(tf_fault_injection_mask, algo, SHA1) ||
+ FAULTY(tf_fault_injection_mask, algo, SHA224) ||
+ FAULTY(tf_fault_injection_mask, algo, SHA256)) {
+ pr_notice("TF: injecting fault in digest!\n");
+ out[0] = 0xff;
+ out[1] ^= 0xff;
+ } else {
+ dpr_info("%s: no fault "
+ "(mask=0x%x algo=%u)\n",
+ __func__, tf_fault_injection_mask, algo);
+ }
+#undef FAULTY
+#endif /* CONFIG_TF_DRIVER_FAULT_INJECTION */
return 0;
}
@@ -773,24 +806,21 @@ static int tf_digest_final(struct tf_crypto_sha_operation_state *state,
* Digest HWA registration into kernel crypto framework
*/
+static DEFINE_SPINLOCK(digest_lock);
+
static int digest_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
- /* Make sure SHA/MD5 HWA is accessible */
- tf_delayed_secure_resume();
-
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
-
tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
- tf_digest_update(state, (u8 *) data, len);
+ spin_lock(&digest_lock);
+ tf_digest_update(state, (u8 *) data, len, TF_BUFFER_KERNEL);
+ spin_unlock(&digest_lock);
tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
-
return 0;
}
@@ -799,19 +829,14 @@ static int digest_final(struct shash_desc *desc, u8 *out)
int ret;
struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
- /* Make sure SHA/MD5 HWA is accessible */
- tf_delayed_secure_resume();
-
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
-
tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+ spin_lock(&digest_lock);
ret = tf_digest_final(state, out);
+ spin_unlock(&digest_lock);
tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
-
return ret;
}
@@ -951,7 +976,7 @@ int register_smc_public_crypto_digest(void)
{
int ret;
- dprintk(KERN_INFO "SMC: Registering digest algorithms\n");
+ dpr_info("SMC: Registering digest algorithms\n");
ret = crypto_register_shash(&smc_md5_alg);
if (ret)
@@ -982,7 +1007,7 @@ sha1_err:
void unregister_smc_public_crypto_digest(void)
{
- dprintk(KERN_INFO "SMC: Unregistering digest algorithms\n");
+ dpr_info("SMC: Unregistering digest algorithms\n");
crypto_unregister_shash(&smc_md5_alg);
crypto_unregister_shash(&smc_sha1_alg);
diff --git a/security/smc/tf_crypto_hmac.c b/security/smc/tf_crypto_hmac.c
new file mode 100644
index 0000000..0d3a23b
--- /dev/null
+++ b/security/smc/tf_crypto_hmac.c
@@ -0,0 +1,296 @@
+/*
+ * Cryptographic API.
+ *
+ * HMAC: Keyed-Hashing for Message Authentication (RFC2104).
+ *
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * The HMAC implementation is derived from USAGI.
+ * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/string.h>
+#include "tf_util.h"
+#include "tf_crypto.h"
+
+struct hmac_ctx {
+ struct crypto_shash *hash;
+};
+
+static inline void *align_ptr(void *p, unsigned int align)
+{
+ return (void *)ALIGN((unsigned long)p, align);
+}
+
+static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm)
+{
+ return align_ptr(crypto_shash_ctx_aligned(tfm) +
+ crypto_shash_statesize(tfm) * 2,
+ crypto_tfm_ctx_alignment());
+}
+
+static int hmac_setkey(struct crypto_shash *parent,
+ const u8 *inkey, unsigned int keylen)
+{
+ int bs = crypto_shash_blocksize(parent);
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *ipad = crypto_shash_ctx_aligned(parent);
+ char *opad = ipad + ss;
+ struct hmac_ctx *ctx = align_ptr(opad + ss,
+ crypto_tfm_ctx_alignment());
+ struct crypto_shash *hash = ctx->hash;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(hash)];
+ } desc;
+ unsigned int i;
+
+ desc.shash.tfm = hash;
+ desc.shash.flags = crypto_shash_get_flags(parent) &
+ CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ if (keylen > bs) {
+ int err;
+
+ err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad);
+ if (err)
+ return err;
+
+ keylen = ds;
+ } else
+ memcpy(ipad, inkey, keylen);
+
+ memset(ipad + keylen, 0, bs - keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ return crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, ipad, bs) ?:
+ crypto_shash_export(&desc.shash, ipad) ?:
+ crypto_shash_init(&desc.shash) ?:
+ crypto_shash_update(&desc.shash, opad, bs) ?:
+ crypto_shash_export(&desc.shash, opad);
+}
+
+static int hmac_export(struct shash_desc *pdesc, void *out)
+{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_export(desc, out);
+}
+
+static int hmac_import(struct shash_desc *pdesc, const void *in)
+{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+ struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
+
+ desc->tfm = ctx->hash;
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_import(desc, in);
+}
+
+static int hmac_init(struct shash_desc *pdesc)
+{
+ return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm));
+}
+
+static int hmac_update(struct shash_desc *pdesc,
+ const u8 *data, unsigned int nbytes)
+{
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_update(desc, data, nbytes);
+}
+
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+#define FAULTY(mask, actual_name, constant, target_name) \
+ (((mask) & TF_CRYPTO_ALG_HMAC_##constant) && \
+ !strcmp((target_name), (actual_name)))
+static int hmac_inject_fault(struct crypto_shash *self, u8 *out)
+{
+ struct crypto_shash *parent = hmac_ctx(self)->hash;
+ const char *alg_name = crypto_tfm_alg_name(crypto_shash_tfm(parent));
+ if (FAULTY(tf_fault_injection_mask, alg_name, MD5, "md5") ||
+ FAULTY(tf_fault_injection_mask, alg_name, SHA1, "sha1") ||
+ FAULTY(tf_fault_injection_mask, alg_name, SHA224, "sha224") ||
+ FAULTY(tf_fault_injection_mask, alg_name, SHA256, "sha256")) {
+ pr_warning("TF: injecting fault in HMAC(%s)\n", alg_name);
+ out[2] = 0xff;
+ out[3] ^= 0xff;
+ } else {
+ dprintk(KERN_INFO "HMAC(%s): no fault\n", alg_name);
+ }
+ return 0;
+}
+#undef FAULTY
+#else
+#define hmac_inject_fault(parent, out) 0
+#endif
+
+static int hmac_final(struct shash_desc *pdesc, u8 *out)
+{
+ struct crypto_shash *parent = pdesc->tfm;
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_final(desc, out) ?:
+ crypto_shash_import(desc, opad) ?:
+ crypto_shash_finup(desc, out, ds, out) ?:
+ hmac_inject_fault(parent, out);
+}
+
+static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
+ unsigned int nbytes, u8 *out)
+{
+
+ struct crypto_shash *parent = pdesc->tfm;
+ int ds = crypto_shash_digestsize(parent);
+ int ss = crypto_shash_statesize(parent);
+ char *opad = crypto_shash_ctx_aligned(parent) + ss;
+ struct shash_desc *desc = shash_desc_ctx(pdesc);
+
+ desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_finup(desc, data, nbytes, out) ?:
+ crypto_shash_import(desc, opad) ?:
+ crypto_shash_finup(desc, out, ds, out) ?:
+ hmac_inject_fault(parent, out);
+}
+
+static int hmac_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_shash *parent = __crypto_shash_cast(tfm);
+ struct crypto_shash *hash;
+ struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst);
+ struct hmac_ctx *ctx = hmac_ctx(parent);
+
+ hash = crypto_spawn_shash(spawn);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+
+ parent->descsize = sizeof(struct shash_desc) +
+ crypto_shash_descsize(hash);
+
+ ctx->hash = hash;
+ return 0;
+}
+
+static void hmac_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm));
+ crypto_free_shash(ctx->hash);
+}
+
+static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct shash_instance *inst;
+ struct crypto_alg *alg;
+ struct shash_alg *salg;
+ int err;
+ int ds;
+ int ss;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH);
+ if (err)
+ return err;
+
+ salg = shash_attr_alg(tb[1], 0, 0);
+ if (IS_ERR(salg))
+ return PTR_ERR(salg);
+
+ err = -EINVAL;
+ ds = salg->digestsize;
+ ss = salg->statesize;
+ alg = &salg->base;
+ if (ds > alg->cra_blocksize ||
+ ss < alg->cra_blocksize)
+ goto out_put_alg;
+
+ inst = shash_alloc_instance("tf_hmac", alg);
+ err = PTR_ERR(inst);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg,
+ shash_crypto_instance(inst));
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ ss = ALIGN(ss, alg->cra_alignmask + 1);
+ inst->alg.digestsize = ds;
+ inst->alg.statesize = ss;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
+ ALIGN(ss * 2, crypto_tfm_ctx_alignment());
+
+ inst->alg.base.cra_init = hmac_init_tfm;
+ inst->alg.base.cra_exit = hmac_exit_tfm;
+
+ inst->alg.init = hmac_init;
+ inst->alg.update = hmac_update;
+ inst->alg.final = hmac_final;
+ inst->alg.finup = hmac_finup;
+ inst->alg.export = hmac_export;
+ inst->alg.import = hmac_import;
+ inst->alg.setkey = hmac_setkey;
+
+ err = shash_register_instance(tmpl, inst);
+ if (err) {
+out_free_inst:
+ shash_free_instance(shash_crypto_instance(inst));
+ }
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return err;
+}
+
+static struct crypto_template hmac_tmpl = {
+ .name = "tf_hmac",
+ .create = hmac_create,
+ .free = shash_free_instance,
+ .module = THIS_MODULE,
+};
+
+int __init tf_crypto_hmac_module_init(void)
+{
+ return crypto_register_template(&hmac_tmpl);
+}
+
+void __exit tf_crypto_hmac_module_exit(void)
+{
+ crypto_unregister_template(&hmac_tmpl);
+}
diff --git a/security/smc/tf_defs.h b/security/smc/tf_defs.h
index 23dc7ca..ba9940d 100644
--- a/security/smc/tf_defs.h
+++ b/security/smc/tf_defs.h
@@ -169,7 +169,7 @@ struct tf_shmem_desc {
u32 block_identifier;
/* Client buffer */
- u8 *pBuffer;
+ u8 *client_buffer;
/* Up to eight coarse page table context */
struct tf_coarse_page_table *coarse_pg_table[TF_MAX_COARSE_PAGES];
@@ -212,7 +212,7 @@ struct tf_comm {
/*
* The virtual address of the L1 shared buffer.
*/
- struct tf_l1_shared_buffer *pBuffer;
+ struct tf_l1_shared_buffer *l1_buffer;
/*
* The wait queue the client threads are waiting on.
@@ -246,8 +246,6 @@ struct tf_comm {
*/
int se_initialized;
- /* Virtual address of the L0 communication buffer */
- void *init_shared_buffer;
/*
* Lock to be held by a client when executing an RPC
@@ -271,13 +269,6 @@ struct tf_comm {
/*----------------------------------------------------------------------------*/
struct tf_device_stats {
- struct kobject kobj;
-
- struct kobj_type kobj_type;
-
- struct attribute kobj_stat_attribute;
-
- struct attribute *kobj_attribute_list[2];
atomic_t stat_pages_allocated;
atomic_t stat_memories_allocated;
@@ -292,6 +283,11 @@ struct tf_device_stats {
*/
struct tf_device {
/*
+ * The kernel object for the device
+ */
+ struct kobject kobj;
+
+ /*
* The device number for the device.
*/
dev_t dev_number;
@@ -544,10 +540,6 @@ struct tf_device *tf_get_device(void);
* Kernel Differences
*/
-#ifdef CONFIG_ANDROID
#define GROUP_INFO get_current_groups()
-#else
-#define GROUP_INFO (current->group_info)
-#endif
#endif /* !defined(__TF_DEFS_H__) */
diff --git a/security/smc/tf_device.c b/security/smc/tf_device.c
index 7c2c623..fcd09e3 100644
--- a/security/smc/tf_device.c
+++ b/security/smc/tf_device.c
@@ -27,9 +27,7 @@
#include <linux/syscore_ops.h>
#include <linux/vmalloc.h>
#include <linux/signal.h>
-#ifdef CONFIG_ANDROID
#include <linux/device.h>
-#endif
#include "tf_protocol.h"
#include "tf_defs.h"
@@ -40,6 +38,9 @@
#include <plat/cpu.h>
#include "tf_zebra.h"
#endif
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+#include "tf_crypto.h"
+#endif
#include "s_version.h"
@@ -129,10 +130,29 @@ MODULE_PARM_DESC(soft_interrupt,
"The softint interrupt line used by the Secure world");
#endif
-#ifdef CONFIG_ANDROID
-static struct class *tf_class;
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+unsigned tf_debug_level = UINT_MAX;
+module_param_named(debug, tf_debug_level, uint, 0644);
+#endif
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+char *tf_integrity_hmac_sha256_expected_value;
+module_param_named(hmac_sha256, tf_integrity_hmac_sha256_expected_value,
+ charp, 0444);
+
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+unsigned tf_fault_injection_mask;
+module_param_named(fault, tf_fault_injection_mask, uint, 0644);
+#endif
+
+int tf_self_test_blkcipher_align;
+module_param_named(post_align, tf_self_test_blkcipher_align, int, 0644);
+int tf_self_test_blkcipher_use_vmalloc;
+module_param_named(post_vmalloc, tf_self_test_blkcipher_use_vmalloc, int, 0644);
#endif
+static struct class *tf_class;
+
/*----------------------------------------------------------------------------
* Global Variables
*----------------------------------------------------------------------------*/
@@ -163,42 +183,114 @@ struct tf_device *tf_get_device(void)
}
/*
- * displays the driver stats
+ * sysfs entries
*/
-static ssize_t kobject_show(struct kobject *kobj,
- struct attribute *attribute, char *buf)
-{
- struct tf_device_stats *dev_stats = &g_tf_dev.stats;
- u32 pages_allocated;
- u32 pages_locked;
- u32 memories_allocated;
+struct tf_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct tf_device *, char *);
+ ssize_t (*store)(struct tf_device *, const char *, size_t);
+};
- memories_allocated =
- atomic_read(&(dev_stats->stat_memories_allocated));
- pages_allocated =
- atomic_read(&(dev_stats->stat_pages_allocated));
- pages_locked = atomic_read(&(dev_stats->stat_pages_locked));
+/*
+ * sysfs entry showing allocation stats
+ */
+static ssize_t info_show(struct tf_device *dev, char *buf)
+{
+ struct tf_device_stats *dev_stats = &dev->stats;
- /*
- * AFY: could we add the number of context switches (call to the SMI
- * instruction)
- */
return snprintf(buf, PAGE_SIZE,
"stat.memories.allocated: %d\n"
"stat.pages.allocated: %d\n"
"stat.pages.locked: %d\n",
- memories_allocated,
- pages_allocated,
- pages_locked);
+ atomic_read(&dev_stats->stat_memories_allocated),
+ atomic_read(&dev_stats->stat_pages_allocated),
+ atomic_read(&dev_stats->stat_pages_locked));
+}
+static struct tf_sysfs_entry tf_info_entry = __ATTR_RO(info);
+
+#ifdef CONFIG_TF_ZEBRA
+/*
+ * sysfs entry showing whether secure world is up and running
+ */
+static ssize_t tf_started_show(struct tf_device *dev, char *buf)
+{
+ int tf_started = test_bit(TF_COMM_FLAG_PA_AVAILABLE,
+ &dev->sm.flags);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", tf_started ? "yes" : "no");
+}
+static struct tf_sysfs_entry tf_started_entry =
+ __ATTR_RO(tf_started);
+
+static ssize_t workspace_addr_show(struct tf_device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_addr);
+}
+static struct tf_sysfs_entry tf_workspace_addr_entry =
+ __ATTR_RO(workspace_addr);
+
+static ssize_t workspace_size_show(struct tf_device *dev, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_size);
+}
+static struct tf_sysfs_entry tf_workspace_size_entry =
+ __ATTR_RO(workspace_size);
+#endif
+
+static ssize_t tf_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *page)
+{
+ struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+ attr);
+ struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(dev, page);
+}
+
+static ssize_t tf_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
+ attr);
+ struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
+
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(dev, page, length);
}
-static const struct sysfs_ops kobj_sysfs_operations = {
- .show = kobject_show,
+static void tf_kobj_release(struct kobject *kobj) {}
+
+static struct attribute *tf_default_attrs[] = {
+ &tf_info_entry.attr,
+#ifdef CONFIG_TF_ZEBRA
+ &tf_started_entry.attr,
+ &tf_workspace_addr_entry.attr,
+ &tf_workspace_size_entry.attr,
+#endif
+ NULL,
+};
+static const struct sysfs_ops tf_sysfs_ops = {
+ .show = tf_attr_show,
+ .store = tf_attr_store,
+};
+static struct kobj_type tf_ktype = {
+ .release = tf_kobj_release,
+ .sysfs_ops = &tf_sysfs_ops,
+ .default_attrs = tf_default_attrs
};
/*----------------------------------------------------------------------------*/
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+static char *smc_mem;
+module_param(smc_mem, charp, S_IRUGO);
+#endif
static const struct syscore_ops g_tf_syscore_ops = {
.shutdown = tf_device_shutdown,
.suspend = tf_device_suspend,
@@ -212,7 +304,6 @@ static int __init tf_device_register(void)
{
int error;
struct tf_device *dev = &g_tf_dev;
- struct tf_device_stats *dev_stats = &dev->stats;
dprintk(KERN_INFO "tf_device_register()\n");
@@ -227,21 +318,33 @@ static int __init tf_device_register(void)
INIT_LIST_HEAD(&dev->connection_list);
spin_lock_init(&dev->connection_list_lock);
- /* register the sysfs object driver stats */
- dev_stats->kobj_type.sysfs_ops = &kobj_sysfs_operations;
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+ error = (*tf_comm_early_init)();
+ if (error)
+ goto module_early_init_failed;
+
+ error = tf_device_mshield_init(smc_mem);
+ if (error)
+ goto mshield_init_failed;
+
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ error = tf_crypto_hmac_module_init();
+ if (error)
+ goto hmac_init_failed;
- dev_stats->kobj_stat_attribute.name = "info";
- dev_stats->kobj_stat_attribute.mode = S_IRUGO;
- dev_stats->kobj_attribute_list[0] =
- &dev_stats->kobj_stat_attribute;
+ error = tf_self_test_register_device();
+ if (error)
+ goto self_test_register_device_failed;
+#endif
+#endif
- dev_stats->kobj_type.default_attrs =
- dev_stats->kobj_attribute_list,
- error = kobject_init_and_add(&(dev_stats->kobj),
- &(dev_stats->kobj_type), NULL, "%s",
+ /* register the sysfs object driver stats */
+ error = kobject_init_and_add(&dev->kobj, &tf_ktype, NULL, "%s",
TF_DEVICE_BASE_NAME);
if (error) {
- kobject_put(&dev_stats->kobj);
+ printk(KERN_ERR "tf_device_register(): "
+ "kobject_init_and_add failed (error %d)!\n", error);
+ kobject_put(&dev->kobj);
goto kobject_init_and_add_failed;
}
@@ -285,12 +388,22 @@ static int __init tf_device_register(void)
goto init_failed;
}
-#ifdef CONFIG_ANDROID
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ error = tf_self_test_post_init(&(dev_stats->kobj));
+ /* N.B. error > 0 indicates a POST failure, which will not
+ prevent the module from loading. */
+ if (error < 0) {
+ dprintk(KERN_ERR "tf_device_register(): "
+ "tf_self_test_post_vectors failed (error %d)!\n",
+ error);
+ goto post_failed;
+ }
+#endif
+
tf_class = class_create(THIS_MODULE, TF_DEVICE_BASE_NAME);
device_create(tf_class, NULL,
dev->dev_number,
NULL, TF_DEVICE_BASE_NAME);
-#endif
#ifdef CONFIG_TF_ZEBRA
/*
@@ -298,11 +411,10 @@ static int __init tf_device_register(void)
*/
error = tf_ctrl_device_register();
if (error)
- goto init_failed;
+ goto ctrl_failed;
#endif
-#ifdef CONFIG_BENCH_SECURE_CYCLE
- run_bogo_mips();
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
address_cache_property((unsigned long) &tf_device_register);
#endif
/*
@@ -315,6 +427,13 @@ static int __init tf_device_register(void)
/*
* Error: undo all operations in the reverse order
*/
+#ifdef CONFIG_TF_ZEBRA
+ctrl_failed:
+#endif
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ tf_self_test_post_exit();
+post_failed:
+#endif
init_failed:
cdev_del(&dev->cdev);
cdev_add_failed:
@@ -322,8 +441,19 @@ cdev_add_failed:
register_chrdev_region_failed:
unregister_syscore_ops((struct syscore_ops *)&g_tf_syscore_ops);
kobject_init_and_add_failed:
- kobject_del(&g_tf_dev.stats.kobj);
-
+ kobject_del(&g_tf_dev.kobj);
+
+#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
+#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
+ tf_self_test_unregister_device();
+self_test_register_device_failed:
+ tf_crypto_hmac_module_exit();
+hmac_init_failed:
+#endif
+ tf_device_mshield_exit();
+mshield_init_failed:
+module_early_init_failed:
+#endif
dprintk(KERN_INFO "tf_device_register(): Failure (error %d)\n",
error);
return error;
@@ -349,19 +479,6 @@ static int tf_device_open(struct inode *inode, struct file *file)
goto error;
}
-#ifndef CONFIG_ANDROID
- /*
- * Check file flags. We only autthorize the O_RDWR access
- */
- if (file->f_flags != O_RDWR) {
- dprintk(KERN_ERR "tf_device_open(%p): "
- "Invalid access mode %u\n",
- file, file->f_flags);
- error = -EACCES;
- goto error;
- }
-#endif
-
/*
* Open a new connection.
*/
diff --git a/security/smc/tf_device_mshield.c b/security/smc/tf_device_mshield.c
index 17f1451..309e15e 100644
--- a/security/smc/tf_device_mshield.c
+++ b/security/smc/tf_device_mshield.c
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2010 Trusted Logic S.A.
+ * Copyright (c) 2011 Trusted Logic S.A.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -27,9 +27,7 @@
#include <linux/sysdev.h>
#include <linux/vmalloc.h>
#include <linux/signal.h>
-#ifdef CONFIG_ANDROID
#include <linux/device.h>
-#endif
#include <linux/init.h>
#include <linux/bootmem.h>
@@ -45,9 +43,7 @@
#define TF_PA_CTRL_START 0x1
#define TF_PA_CTRL_STOP 0x2
-#ifdef CONFIG_ANDROID
static struct class *tf_ctrl_class;
-#endif
#define TF_DEVICE_CTRL_BASE_NAME "tf_ctrl"
@@ -66,20 +62,37 @@ static int tf_ctrl_check_omap_type(void)
/* No need to do anything on a GP device */
switch (omap_type()) {
case OMAP2_DEVICE_TYPE_GP:
- dprintk(KERN_INFO "SMC: Running on a GP device\n");
+ dpr_info("SMC: Running on a GP device\n");
return 0;
case OMAP2_DEVICE_TYPE_EMU:
case OMAP2_DEVICE_TYPE_SEC:
/*case OMAP2_DEVICE_TYPE_TEST:*/
- dprintk(KERN_INFO "SMC: Running on a EMU or HS device\n");
+ dpr_info("SMC: Running on a EMU or HS device\n");
return 1;
default:
- printk(KERN_ERR "SMC: unknown omap type %x\n", omap_type());
+ pr_err("SMC: unknown omap type %x\n", omap_type());
return -EFAULT;
}
}
+/*----------------------------------------------------------------------------*/
+
+static int tf_ctrl_device_release(struct inode *inode, struct file *file)
+{
+ struct tf_connection *connection;
+
+ dpr_info("%s(%u:%u, %p)\n",
+ __func__, imajor(inode), iminor(inode), file);
+
+ connection = tf_conn_from_file(file);
+ tf_close(connection);
+
+ dpr_info("%s(%p): Success\n", __func__, file);
+ return 0;
+}
+
+/*----------------------------------------------------------------------------*/
#define IOCTL_TF_PA_CTRL _IOWR('z', 0xFF, struct tf_pa_ctrl)
@@ -88,29 +101,25 @@ static long tf_ctrl_device_ioctl(struct file *file, unsigned int ioctl_num,
{
int result = S_SUCCESS;
struct tf_pa_ctrl pa_ctrl;
- u8 *pa_buffer = NULL;
- u8 *conf_buffer = NULL;
struct tf_device *dev = tf_get_device();
- dprintk(KERN_INFO "tf_ctrl_device_ioctl(%p, %u, %p)\n",
- file, ioctl_num, (void *) ioctl_param);
+ dpr_info("%s(%p, %u, %p)\n",
+ __func__, file, ioctl_num, (void *) ioctl_param);
mutex_lock(&dev->dev_mutex);
if (ioctl_num != IOCTL_TF_PA_CTRL) {
- dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
- "ioctl number is invalid (%p)\n",
- file, (void *)ioctl_num);
+ dpr_err("%s(%p): ioctl number is invalid (%p)\n",
+ __func__, file, (void *)ioctl_num);
result = -EFAULT;
goto exit;
}
if ((ioctl_param & 0x3) != 0) {
- dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
- "ioctl command message pointer is not word "
+ dpr_err("%s(%p): ioctl command message pointer is not word "
"aligned (%p)\n",
- file, (void *)ioctl_param);
+ __func__, file, (void *)ioctl_param);
result = -EFAULT;
goto exit;
@@ -118,94 +127,88 @@ static long tf_ctrl_device_ioctl(struct file *file, unsigned int ioctl_num,
if (copy_from_user(&pa_ctrl, (struct tf_pa_ctrl *)ioctl_param,
sizeof(struct tf_pa_ctrl))) {
- dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
- "cannot access ioctl parameter (%p)\n",
- file, (void *)ioctl_param);
+ dpr_err("%s(%p): cannot access ioctl parameter (%p)\n",
+ __func__, file, (void *)ioctl_param);
result = -EFAULT;
goto exit;
}
switch (pa_ctrl.nPACommand) {
- case TF_PA_CTRL_START:
- dprintk(KERN_INFO "tf_ctrl_device_ioctl(%p): "
- "Start the SMC PA (%d bytes) with conf (%d bytes)\n",
- file, pa_ctrl.pa_size, pa_ctrl.conf_size);
-
- pa_buffer = (u8 *) internal_kmalloc(pa_ctrl.pa_size,
- GFP_KERNEL);
- if (pa_buffer == NULL) {
- dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
- "Out of memory for PA buffer\n", file);
+ case TF_PA_CTRL_START: {
+ struct tf_shmem_desc *shmem_desc = NULL;
+ u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
+ u32 descriptor_count;
+ u32 offset;
+ struct tf_connection *connection;
- result = -ENOMEM;
- goto exit;
- }
+ dpr_info("%s(%p): Start the SMC PA (%d bytes) with conf "
+ "(%d bytes)\n",
+ __func__, file, pa_ctrl.pa_size, pa_ctrl.conf_size);
- if (copy_from_user(
- pa_buffer, pa_ctrl.pa_buffer, pa_ctrl.pa_size)) {
- dprintk(KERN_ERR "tf_ctrl_device_ioctl(%p): "
- "Cannot access PA buffer (%p)\n",
- file, (void *) pa_ctrl.pa_buffer);
+ connection = tf_conn_from_file(file);
- internal_kfree(pa_buffer);
-
- result = -EFAULT;
- goto exit;
+ if (dev->workspace_addr == 0) {
+ result = -ENOMEM;
+ goto start_exit;
}
- if (pa_ctrl.conf_size > 0) {
- conf_buffer = (u8 *) internal_kmalloc(
- pa_ctrl.conf_size, GFP_KERNEL);
- if (conf_buffer == NULL) {
- internal_kfree(pa_buffer);
-
- result = -ENOMEM;
- goto exit;
- }
-
- if (copy_from_user(conf_buffer,
- pa_ctrl.conf_buffer, pa_ctrl.conf_size)) {
- internal_kfree(pa_buffer);
- internal_kfree(conf_buffer);
+ result = tf_validate_shmem_and_flags(
+ (u32)pa_ctrl.conf_buffer,
+ pa_ctrl.conf_size,
+ TF_SHMEM_TYPE_READ);
+ if (result != 0)
+ goto start_exit;
+
+ offset = 0;
+ result = tf_map_shmem(
+ connection,
+ (u32)pa_ctrl.conf_buffer,
+ TF_SHMEM_TYPE_READ,
+ true, /* in user space */
+ shared_mem_descriptors,
+ &offset,
+ pa_ctrl.conf_size,
+ &shmem_desc,
+ &descriptor_count);
+ if (result != 0)
+ goto start_exit;
+
+ if (descriptor_count > 1) {
+ dpr_err("%s(%p): configuration file is too long (%d)\n",
+ __func__, file, descriptor_count);
- result = -EFAULT;
- goto exit;
- }
- }
-
- if (dev->workspace_addr == 0) {
result = -ENOMEM;
- goto exit;
+ goto start_exit;
}
result = tf_start(&dev->sm,
dev->workspace_addr,
dev->workspace_size,
- pa_buffer,
+ pa_ctrl.pa_buffer,
pa_ctrl.pa_size,
- conf_buffer,
+ shared_mem_descriptors[0],
+ offset,
pa_ctrl.conf_size);
if (result)
- dprintk(KERN_ERR "SMC: start failed\n");
+ dpr_err("SMC: start failed\n");
else
- dprintk(KERN_INFO "SMC: started\n");
+ dpr_info("SMC: started\n");
- internal_kfree(pa_buffer);
- internal_kfree(conf_buffer);
+start_exit:
+ tf_unmap_shmem(connection, shmem_desc, true); /* full cleanup */
break;
+ }
case TF_PA_CTRL_STOP:
- dprintk(KERN_INFO "tf_ctrl_device_ioctl(%p): "
- "Stop the SMC PA\n", file);
+ dpr_info("%s(%p): Stop the SMC PA\n", __func__, file);
result = tf_power_management(&dev->sm,
TF_POWER_OPERATION_SHUTDOWN);
if (result)
- dprintk(KERN_WARNING "SMC: stop failed [0x%x]\n",
- result);
+ dpr_err("SMC: stop failed [0x%x]\n", result);
else
- dprintk(KERN_INFO "SMC: stopped\n");
+ dpr_info("SMC: stopped\n");
break;
default:
@@ -223,55 +226,53 @@ exit:
static int tf_ctrl_device_open(struct inode *inode, struct file *file)
{
int error;
+ struct tf_connection *connection = NULL;
- dprintk(KERN_INFO "tf_ctrl_device_open(%u:%u, %p)\n",
- imajor(inode), iminor(inode), file);
+ dpr_info("%s(%u:%u, %p)\n",
+ __func__, imajor(inode), iminor(inode), file);
/* Dummy lseek for non-seekable driver */
error = nonseekable_open(inode, file);
if (error != 0) {
- dprintk(KERN_ERR "tf_ctrl_device_open(%p): "
+ dpr_err("%s(%p): "
"nonseekable_open failed (error %d)!\n",
- file, error);
+ __func__, file, error);
goto error;
}
-#ifndef CONFIG_ANDROID
- /*
- * Check file flags. We only autthorize the O_RDWR access
- */
- if (file->f_flags != O_RDWR) {
- dprintk(KERN_ERR "tf_ctrl_device_open(%p): "
- "Invalid access mode %u\n",
- file, file->f_flags);
- error = -EACCES;
- goto error;
- }
-#endif
-
error = tf_ctrl_check_omap_type();
if (error <= 0)
return error;
+ error = tf_open(tf_get_device(), file, &connection);
+ if (error != 0) {
+ dpr_err("%s(%p): tf_open failed (error %d)!\n",
+ __func__, file, error);
+ goto error;
+ }
+
+ file->private_data = connection;
/*
* Successful completion.
*/
- dprintk(KERN_INFO "tf_ctrl_device_open(%p): Success\n", file);
+ dpr_info("%s(%p): Success\n", __func__, file);
return 0;
/*
* Error handling.
*/
error:
- dprintk(KERN_INFO "tf_ctrl_device_open(%p): Failure (error %d)\n",
- file, error);
+ tf_close(connection);
+ dpr_info("%s(%p): Failure (error %d)\n",
+ __func__, file, error);
return error;
}
static const struct file_operations g_tf_ctrl_device_file_ops = {
.owner = THIS_MODULE,
.open = tf_ctrl_device_open,
+ .release = tf_ctrl_device_release,
.unlocked_ioctl = tf_ctrl_device_ioctl,
.llseek = no_llseek,
};
@@ -297,12 +298,10 @@ int __init tf_ctrl_device_register(void)
return error;
}
-#ifdef CONFIG_ANDROID
tf_ctrl_class = class_create(THIS_MODULE, TF_DEVICE_CTRL_BASE_NAME);
device_create(tf_ctrl_class, NULL,
dev->dev_number + 1,
NULL, TF_DEVICE_CTRL_BASE_NAME);
-#endif
mutex_init(&dev->dev_mutex);
@@ -316,6 +315,8 @@ void __init tf_allocate_workspace(void)
{
struct tf_device *dev = tf_get_device();
+ tf_clock_timer_init();
+
if (tf_ctrl_check_omap_type() <= 0)
return;
@@ -348,4 +349,34 @@ static int __init tf_mem_setup(char *str)
return 0;
}
+#ifdef MODULE
+int __init tf_device_mshield_init(char *smc_mem)
+{
+ if (smc_mem != NULL)
+ tf_mem_setup(smc_mem);
+ tf_allocate_workspace();
+ return 0;
+}
+
+void __exit tf_device_mshield_exit(void)
+{
+ struct tf_device *dev = tf_get_device();
+ if (dev == NULL)
+ return;
+
+ mutex_lock(&dev->dev_mutex);
+ if (tf_ctrl_class != NULL) {
+ device_destroy(tf_ctrl_class, dev->dev_number + 1);
+ class_destroy(tf_ctrl_class);
+ tf_ctrl_class = NULL;
+ }
+ cdev_del(&(dev->cdev_ctrl));
+ unregister_chrdev_region(dev->dev_number + 1, 1);
+ mutex_unlock(&dev->dev_mutex);
+
+ dev->workspace_size = 0;
+ dev->workspace_addr = 0;
+}
+#else
early_param("smc_mem", tf_mem_setup);
+#endif
diff --git a/security/smc/tf_dma.c b/security/smc/tf_dma.c
index a424dbb..ec1b1fa 100644
--- a/security/smc/tf_dma.c
+++ b/security/smc/tf_dma.c
@@ -99,6 +99,8 @@ void tf_dma_set_channel_common_params(
dma_channel->src_fi = 0;
dma_channel->dst_ei = 0;
dma_channel->dst_fi = 0;
+ dma_channel->read_prio = 0;
+ dma_channel->write_prio = 0;
dma_channel->sync_mode = OMAP_DMA_SYNC_FRAME;
dma_channel->src_start = src_start;
dma_channel->dst_start = dst_start;
diff --git a/security/smc/tf_protocol.h b/security/smc/tf_protocol.h
index e3e6485..cf9e68f 100644
--- a/security/smc/tf_protocol.h
+++ b/security/smc/tf_protocol.h
@@ -191,7 +191,12 @@ union tf_answer_param {
* Descriptor tables capacity
*/
#define TF_MAX_W3B_COARSE_PAGES (2)
-#define TF_MAX_COARSE_PAGES (8)
+/* TF_MAX_COARSE_PAGES is the number of level 1 descriptors (describing
+ * 1MB each) that can be shared with the secure world in a single registered
+ * shared memory block. It must be kept in synch with
+ * SCHANNEL6_MAX_DESCRIPTORS_PER_REGISTERED_SHARED_MEM in the SChannel
+ * protocol spec. */
+#define TF_MAX_COARSE_PAGES 128
#define TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
#define TF_DESCRIPTOR_TABLE_CAPACITY \
(1 << TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
@@ -619,11 +624,27 @@ union tf_answer {
/* Structure of the Communication Buffer */
struct tf_l1_shared_buffer {
+ #ifdef CONFIG_TF_ZEBRA
+ u32 exit_code;
+ u32 l1_shared_buffer_descr;
+ u32 backing_store_addr;
+ u32 backext_storage_addr;
+ u32 workspace_addr;
+ u32 workspace_size;
+ u32 conf_descriptor;
+ u32 conf_size;
+ u32 conf_offset;
+ u32 protocol_version;
+ u32 rpc_command;
+ u32 rpc_status;
+ u8 reserved1[16];
+ #else
u32 config_flag_s;
u32 w3b_size_max_s;
u32 reserved0;
u32 w3b_size_current_s;
u8 reserved1[48];
+ #endif
u8 version_description[TF_DESCRIPTION_BUFFER_LENGTH];
u32 status_s;
u32 reserved2;
diff --git a/security/smc/tf_self_test_device.c b/security/smc/tf_self_test_device.c
new file mode 100644
index 0000000..c9ed463
--- /dev/null
+++ b/security/smc/tf_self_test_device.c
@@ -0,0 +1,508 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/cdev.h>
+#include <linux/crypto.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+
+#include "tf_crypto.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+#include "tf_self_test_io.h"
+
+struct tf_self_test_hash_data {
+ struct hash_desc desc;
+ void *buf;
+ unsigned buffer_size;
+ unsigned digestsize;
+ void *key;
+};
+struct tf_self_test_blkcipher_data {
+ struct blkcipher_desc desc;
+ void *key;
+ bool decrypt; /*false => encrypt, true => decrypt*/
+};
+
+struct tf_self_test_file_data {
+ unsigned type;
+ struct scatterlist sg[2];
+ union {
+ struct tf_self_test_hash_data hash;
+ struct tf_self_test_blkcipher_data cipher;
+ };
+};
+
+static void tf_self_test_finalize(struct tf_self_test_file_data *data)
+{
+ switch (data->type & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_HASH:
+ case CRYPTO_ALG_TYPE_SHASH:
+ case CRYPTO_ALG_TYPE_AHASH:
+ if (!IS_ERR_OR_NULL(data->hash.buf))
+ kfree(data->hash.buf);
+ if (!IS_ERR_OR_NULL(data->hash.desc.tfm))
+ crypto_free_hash(data->hash.desc.tfm);
+ if (!IS_ERR_OR_NULL(data->hash.key))
+ kfree(data->hash.key);
+ break;
+ case CRYPTO_ALG_TYPE_BLKCIPHER:
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ if (!IS_ERR_OR_NULL(data->cipher.desc.tfm))
+ crypto_free_blkcipher(data->cipher.desc.tfm);
+ if (!IS_ERR_OR_NULL(data->cipher.key))
+ kfree(data->cipher.key);
+ break;
+ }
+ memset(data, 0, sizeof(*data));
+}
+
+static int tf_self_test_device_open(struct inode *inode, struct file *filp)
+{
+ struct tf_self_test_file_data *data;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+ data->type = 0;
+ filp->private_data = data;
+ return 0;
+}
+
+static int tf_self_test_device_release(struct inode *inode, struct file *filp)
+{
+ tf_self_test_finalize(filp->private_data);
+ kfree(filp->private_data);
+ return 0;
+}
+
+static long tf_self_test_ioctl_hash_init(struct file *filp,
+ void __user *user_params)
+{
+ struct tf_self_test_ioctl_param_hash_init params;
+ struct tf_self_test_file_data *data = filp->private_data;
+ void *ptr;
+ size_t size;
+ int ret;
+ char alg_name[CRYPTO_MAX_ALG_NAME] = "?";
+ INFO("invoked");
+
+ /* Reset the connection data. */
+ if (data->type != 0)
+ tf_self_test_finalize(data);
+
+ /* Copy parameters from user space */
+ if (copy_from_user(&params, user_params, sizeof(params)))
+ return -EFAULT;
+ ret = strncpy_from_user(alg_name, params.alg_name, sizeof(alg_name));
+ if (ret < 0)
+ return -EFAULT;
+ else if (ret >= sizeof(alg_name))
+ return -ENAMETOOLONG;
+ INFO("alg_name=%s", alg_name);
+
+ /* Prepare for hashing */
+ data->hash.desc.tfm = crypto_alloc_hash(alg_name, 0, 0);
+ if (IS_ERR_OR_NULL(data->hash.desc.tfm)) {
+ ret = (int)data->hash.desc.tfm;
+ goto abort;
+ }
+ data->type = crypto_tfm_alg_type(&data->hash.desc.tfm->base);
+ data->hash.digestsize = crypto_hash_digestsize(data->hash.desc.tfm);
+
+ /* Set the key if provided */
+ if (params.key != NULL) {
+ u8 key[128];
+ if (params.key_size > sizeof(key)) {
+ ret = -EINVAL;
+ goto abort;
+ }
+ if (copy_from_user(key, params.key, params.key_size)) {
+ ret = -EFAULT;
+ goto abort;
+ }
+ TF_TRACE_ARRAY(key, params.key_size);
+ if (crypto_hash_setkey(data->hash.desc.tfm,
+ key, params.key_size)) {
+ ret = -EIO;
+ goto abort;
+ }
+ INFO("crypto_hash_setkey ok");
+ }
+ if (crypto_hash_init(&data->hash.desc)) {
+ ret = -EIO;
+ goto abort;
+ }
+ INFO("crypto_hash_init(%s) ok", alg_name);
+
+ /* Allocate a staging buffer for the data or the result */
+ size = PAGE_SIZE;
+ BUG_ON(size < data->hash.digestsize);
+ ptr = kzalloc(size, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(ptr)) {
+ ret = -ENOMEM;
+ goto abort;
+ }
+ data->hash.buf = ptr;
+ data->hash.buffer_size = size;
+ INFO("allocated a buffer of %zu bytes", size);
+
+ /* Success */
+ return 0;
+
+abort:
+ ERROR("alg_name=%s", alg_name);
+ tf_self_test_finalize(data);
+ return ret;
+}
+
+static long tf_self_test_ioctl_blkcipher_init(struct file *filp,
+ void __user *user_params)
+{
+ struct tf_self_test_ioctl_param_blkcipher_init params;
+ struct tf_self_test_file_data *data = filp->private_data;
+ int ret;
+ char alg_name[CRYPTO_MAX_ALG_NAME] = "?";
+
+ /* Reset the connection data. */
+ if (data->type != 0)
+ tf_self_test_finalize(data);
+
+ /* Copy parameters from user space */
+ if (copy_from_user(&params, user_params, sizeof(params)))
+ return -EFAULT;
+ ret = strncpy_from_user(alg_name, params.alg_name, sizeof(alg_name));
+ if (ret < 0)
+ return -EFAULT;
+ else if (ret >= sizeof(alg_name))
+ return -ENAMETOOLONG;
+ data->cipher.decrypt = params.decrypt;
+
+ /* Prepare for encryption/decryption */
+ data->cipher.desc.tfm = crypto_alloc_blkcipher(alg_name, 0, 0);
+ if (IS_ERR_OR_NULL(data->cipher.desc.tfm)) {
+ ret = (int)data->cipher.desc.tfm;
+ goto abort;
+ }
+ data->type = crypto_tfm_alg_type(&data->cipher.desc.tfm->base);
+ INFO("crypto_alloc_blkcipher(%s) ok", alg_name);
+
+ /* Set the key if provided */
+ if (params.key != NULL) {
+ u8 key[128];
+ if (params.key_size > sizeof(key)) {
+ ret = -EINVAL;
+ goto abort;
+ }
+ if (copy_from_user(key, params.key, params.key_size)) {
+ ret = -EFAULT;
+ goto abort;
+ }
+ TF_TRACE_ARRAY(key, params.key_size);
+ if (crypto_blkcipher_setkey(data->cipher.desc.tfm,
+ key, params.key_size)) {
+ ret = -EIO;
+ goto abort;
+ }
+ INFO("crypto_blkcipher_setkey ok");
+ } else {
+ /*A key is required for ciphers*/
+ ret = -EINVAL;
+ goto abort;
+ }
+
+ /* Set the IV if provided */
+ if (params.iv != NULL) {
+ unsigned char *iv =
+ crypto_blkcipher_crt(data->cipher.desc.tfm)->iv;
+ unsigned size = crypto_blkcipher_ivsize(data->cipher.desc.tfm);
+ if (size != params.iv_size)
+ WARNING("size=%u iv_size=%u", size, params.iv_size);
+ if (size > params.iv_size)
+ size = params.iv_size;
+ if (copy_from_user(iv, params.iv, size)) {
+ ret = -EFAULT;
+ goto abort;
+ }
+ TF_TRACE_ARRAY(iv, params.iv_size);
+ }
+
+ /* Success */
+ return 0;
+
+abort:
+ ERROR("alg_name=%s", alg_name);
+ tf_self_test_finalize(data);
+ return ret;
+}
+
+static long tf_self_test_ioctl_blkcipher_update(struct file *filp,
+ void __user *user_params)
+{
+ struct tf_self_test_ioctl_param_blkcipher_update params;
+ struct tf_self_test_file_data *data = filp->private_data;
+ struct scatterlist sg_in, sg_out;
+ unsigned char in[256] = {0};
+ unsigned char out[sizeof(in)] = "Uninitialized!";
+ struct blkcipher_tfm *crt;
+ int (*crypt)(struct blkcipher_desc *, struct scatterlist *,
+ struct scatterlist *, unsigned int);
+
+
+ switch (data->type & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_BLKCIPHER:
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ /* Copy parameters from user space */
+ if (copy_from_user(&params, user_params, sizeof(params)))
+ return -EFAULT;
+ if (params.in == NULL || params.out == NULL)
+ return -EINVAL;
+ if (params.size > sizeof(in) || params.size > sizeof(out))
+ return -ENOSPC;
+ if (copy_from_user(in, params.in, params.size))
+ return -EFAULT;
+
+ /* Perform the encryption or decryption */
+ sg_init_one(&sg_in, in, params.size);
+ sg_init_one(&sg_out, out, params.size);
+ crt = crypto_blkcipher_crt(data->cipher.desc.tfm);
+ data->cipher.desc.info = crt->iv;
+ crypt = (data->cipher.decrypt ? crt->decrypt : crt->encrypt);
+ if (crypt(&data->cipher.desc, &sg_out, &sg_in, params.size))
+ return -EIO;
+
+ /* Copy the result */
+ if (copy_to_user(params.out, out, params.size))
+ return -EFAULT;
+
+ /* Success */
+ return 0;
+}
+
+static long tf_self_test_device_ioctl(
+ struct file *filp, unsigned int ioctl_num,
+ unsigned long ioctl_param)
+{
+ switch (ioctl_num) {
+ case IOCTL_TF_SELF_TEST_POST:
+ return tf_self_test_post_vectors();
+ case IOCTL_TF_SELF_TEST_HASH_INIT:
+ return tf_self_test_ioctl_hash_init(filp, (void *)ioctl_param);
+ break;
+ case IOCTL_TF_SELF_TEST_BLKCIPHER_INIT:
+ return tf_self_test_ioctl_blkcipher_init(filp,
+ (void *)ioctl_param);
+ break;
+ case IOCTL_TF_SELF_TEST_BLKCIPHER_UPDATE:
+ return tf_self_test_ioctl_blkcipher_update(filp,
+ (void *)ioctl_param);
+ break;
+ default:
+ return -ENOTTY;
+ }
+}
+
+static ssize_t tf_self_test_device_read(struct file *filp,
+ char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct tf_self_test_file_data *data = filp->private_data;
+ INFO("type=%03x tfm=%d pos=%lu",
+ data->type, data->hash.desc.tfm != NULL, (unsigned long)*pos);
+ switch (data->type & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_HASH:
+ case CRYPTO_ALG_TYPE_SHASH:
+ case CRYPTO_ALG_TYPE_AHASH:
+ if (data->hash.desc.tfm != NULL) {
+ /*Stop accepting input and calculate the hash*/
+ if (crypto_hash_final(&data->hash.desc,
+ data->hash.buf)) {
+ ERROR("crypto_hash_final failed");
+ return -EIO;
+ }
+ crypto_free_hash(data->hash.desc.tfm);
+ data->hash.desc.tfm = NULL;
+ {
+ unsigned char *buf = data->hash.buf;
+ TF_TRACE_ARRAY(buf, data->hash.digestsize);
+ }
+ /*Reset the file position for the read part*/
+ *pos = 0;
+ }
+ if (*pos < 0 || *pos >= data->hash.digestsize)
+ return 0;
+ if (*pos + count > data->hash.digestsize)
+ count = data->hash.digestsize - *pos;
+ if (copy_to_user(buf, data->hash.buf, count))
+ return -EFAULT;
+ *pos += count;
+ break;
+ default:
+ return -ENXIO;
+ break;
+ }
+ return count;
+}
+
+static ssize_t tf_self_test_device_write(struct file *filp,
+ const char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct tf_self_test_file_data *data = filp->private_data;
+ INFO("type=%03x tfm=%d pos=%lu",
+ data->type, data->hash.desc.tfm != NULL, (unsigned long)*pos);
+ switch (data->type & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_HASH:
+ case CRYPTO_ALG_TYPE_SHASH:
+ case CRYPTO_ALG_TYPE_AHASH:
+ if (IS_ERR_OR_NULL(data->hash.desc.tfm)) {
+ /*We are no longer accepting input*/
+ return -EROFS;
+ }
+ if (count > data->hash.buffer_size)
+ count = data->hash.buffer_size;
+ if (copy_from_user(data->hash.buf, buf, count))
+ return -EFAULT;
+ TF_TRACE_ARRAY(buf, count);
+ {
+ unsigned char *ptr = data->hash.buf;
+ TF_TRACE_ARRAY(ptr, count);
+ }
+ sg_init_one(&data->sg[0], data->hash.buf, count);
+ if (crypto_hash_update(&data->hash.desc,
+ &data->sg[0], count)) {
+ ERROR("crypto_hash_update failed");
+ tf_self_test_finalize(data);
+ return -EIO;
+ }
+ *pos += count;
+ break;
+ default:
+ return -ENXIO;
+ break;
+ }
+ return count;
+}
+
+
+
+static const struct file_operations tf_self_test_device_file_ops = {
+ .owner = THIS_MODULE,
+ .open = tf_self_test_device_open,
+ .release = tf_self_test_device_release,
+ .unlocked_ioctl = tf_self_test_device_ioctl,
+ .read = tf_self_test_device_read,
+ .write = tf_self_test_device_write,
+ .llseek = no_llseek,
+};
+
+struct tf_self_test_device {
+ dev_t devno;
+ struct cdev cdev;
+ struct class *class;
+ struct device *device;
+};
+static struct tf_self_test_device tf_self_test_device;
+
+int __init tf_self_test_register_device(void)
+{
+ struct tf_device *tf_device = tf_get_device();
+ int error;
+
+ tf_self_test_device.devno =
+ MKDEV(MAJOR(tf_device->dev_number),
+ TF_SELF_TEST_DEVICE_MINOR_NUMBER);
+ error = register_chrdev_region(tf_self_test_device.devno, 1,
+ TF_SELF_TEST_DEVICE_BASE_NAME);
+ if (error != 0) {
+ ERROR("register_chrdev_region failed (error %d)", error);
+ goto register_chrdev_region_failed;
+ }
+ cdev_init(&tf_self_test_device.cdev, &tf_self_test_device_file_ops);
+ tf_self_test_device.cdev.owner = THIS_MODULE;
+ error = cdev_add(&tf_self_test_device.cdev,
+ tf_self_test_device.devno, 1);
+ if (error != 0) {
+ ERROR("cdev_add failed (error %d)", error);
+ goto cdev_add_failed;
+ }
+ tf_self_test_device.class =
+ class_create(THIS_MODULE, TF_SELF_TEST_DEVICE_BASE_NAME);
+ if (IS_ERR_OR_NULL(tf_self_test_device.class)) {
+ ERROR("class_create failed (%d)",
+ (int)tf_self_test_device.class);
+ goto class_create_failed;
+ }
+ tf_self_test_device.device =
+ device_create(tf_self_test_device.class, NULL,
+ tf_self_test_device.devno,
+ NULL, TF_SELF_TEST_DEVICE_BASE_NAME);
+ INFO("created device %s = %u:%u",
+ TF_SELF_TEST_DEVICE_BASE_NAME,
+ MAJOR(tf_self_test_device.devno),
+ MINOR(tf_self_test_device.devno));
+ if (IS_ERR_OR_NULL(tf_self_test_device.device)) {
+ ERROR("device_create failed (%d)",
+ (int)tf_self_test_device.device);
+ goto device_create_failed;
+ }
+
+ return 0;
+
+ /*__builtin_unreachable();*/
+device_create_failed:
+ if (!IS_ERR_OR_NULL(tf_self_test_device.class)) {
+ device_destroy(tf_self_test_device.class,
+ tf_self_test_device.devno);
+ class_destroy(tf_self_test_device.class);
+ }
+class_create_failed:
+ tf_self_test_device.class = NULL;
+ cdev_del(&tf_self_test_device.cdev);
+cdev_add_failed:
+ cdev_init(&tf_self_test_device.cdev, NULL);
+ unregister_chrdev_region(tf_self_test_device.devno, 1);
+register_chrdev_region_failed:
+ return error;
+}
+
+void __exit tf_self_test_unregister_device(void)
+{
+ if (!IS_ERR_OR_NULL(tf_self_test_device.class)) {
+ device_destroy(tf_self_test_device.class,
+ tf_self_test_device.devno);
+ class_destroy(tf_self_test_device.class);
+ }
+ tf_self_test_device.class = NULL;
+ if (tf_self_test_device.cdev.owner == THIS_MODULE)
+ cdev_del(&tf_self_test_device.cdev);
+ unregister_chrdev_region(tf_self_test_device.devno, 1);
+}
diff --git a/security/smc/tf_self_test_io.h b/security/smc/tf_self_test_io.h
new file mode 100644
index 0000000..6282000
--- /dev/null
+++ b/security/smc/tf_self_test_io.h
@@ -0,0 +1,54 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+#define TF_SELF_TEST_DEVICE_BASE_NAME "tf_crypto_test"
+#define TF_SELF_TEST_DEVICE_MAJOR_NUMBER 122
+#define TF_SELF_TEST_DEVICE_MINOR_NUMBER 4
+
+#define IOCTL_TF_SELF_TEST_POST _IO('z', 'P')
+
+#define IOCTL_TF_SELF_TEST_HASH_INIT _IO('z', 'H')
+struct tf_self_test_ioctl_param_hash_init {
+ const char *alg_name;
+ unsigned char *key; /*NULL for a simple hash, the key for an HMAC*/
+ uint32_t key_size;
+};
+
+#define IOCTL_TF_SELF_TEST_BLKCIPHER_INIT _IO('z', 'B')
+struct tf_self_test_ioctl_param_blkcipher_init {
+ const char *alg_name;
+ unsigned char *key;
+ uint32_t key_size;
+ unsigned char *iv;
+ uint32_t iv_size;
+ uint32_t decrypt; /*0 => encrypt, 1 => decrypt*/
+};
+
+#define IOCTL_TF_SELF_TEST_BLKCIPHER_UPDATE _IO('z', 'U')
+struct tf_self_test_ioctl_param_blkcipher_update {
+ unsigned char *in;
+ unsigned char *out;
+ uint32_t size;
+};
diff --git a/security/smc/tf_self_test_post.c b/security/smc/tf_self_test_post.c
new file mode 100644
index 0000000..650bf17
--- /dev/null
+++ b/security/smc/tf_self_test_post.c
@@ -0,0 +1,707 @@
+/**
+ * Copyright (c) 2011 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/cdev.h>
+#include <linux/crypto.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/sysdev.h>
+
+#include "tf_crypto.h"
+#include "tf_defs.h"
+#include "tf_util.h"
+
+
+
+/*** Test vectors ***/
+
+struct digest_test_vector {
+ unsigned char *text;
+ unsigned length;
+ unsigned char *digest;
+ unsigned char *key; /*used for HMAC, NULL for plain digests*/
+ unsigned key_length;
+};
+
+struct blkcipher_test_vector {
+ unsigned char *key;
+ unsigned key_length;
+ unsigned char *iv;
+ unsigned char *plaintext;
+ unsigned char *ciphertext;
+ unsigned length;
+};
+
+/* From FIPS-180 */
+struct digest_test_vector sha1_test_vector = {
+ "abc",
+ 3,
+ "\xa9\x99\x3e\x36\x47\x06\x81\x6a\xba\x3e\x25\x71\x78\x50\xc2\x6c"
+ "\x9c\xd0\xd8\x9d"
+};
+struct digest_test_vector sha224_test_vector = {
+ "abc",
+ 3,
+ "\x23\x09\x7D\x22\x34\x05\xD8\x22\x86\x42\xA4\x77\xBD\xA2\x55\xB3"
+ "\x2A\xAD\xBC\xE4\xBD\xA0\xB3\xF7\xE3\x6C\x9D\xA7"
+};
+struct digest_test_vector sha256_test_vector = {
+ "abc",
+ 3,
+ "\xba\x78\x16\xbf\x8f\x01\xcf\xea\x41\x41\x40\xde\x5d\xae\x22\x23"
+ "\xb0\x03\x61\xa3\x96\x17\x7a\x9c\xb4\x10\xff\x61\xf2\x00\x15\xad"
+};
+
+/* From FIPS-198 */
+struct digest_test_vector hmac_sha1_test_vector = {
+ "Sample message for keylen<blocklen",
+ 34,
+ "\x4c\x99\xff\x0c\xb1\xb3\x1b\xd3\x3f\x84\x31\xdb\xaf\x4d\x17\xfc"
+ "\xd3\x56\xa8\x07",
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13",
+ 20,
+};
+struct digest_test_vector hmac_sha224_test_vector = {
+ "Sample message for keylen<blocklen",
+ 34,
+ "\xe3\xd2\x49\xa8\xcf\xb6\x7e\xf8\xb7\xa1\x69\xe9\xa0\xa5\x99\x71"
+ "\x4a\x2c\xec\xba\x65\x99\x9a\x51\xbe\xb8\xfb\xbe",
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b",
+ 28,
+};
+struct digest_test_vector hmac_sha256_test_vector = {
+ "Sample message for keylen<blocklen",
+ 34,
+ "\xa2\x8c\xf4\x31\x30\xee\x69\x6a\x98\xf1\x4a\x37\x67\x8b\x56\xbc"
+ "\xfc\xbd\xd9\xe5\xcf\x69\x71\x7f\xec\xf5\x48\x0f\x0e\xbd\xf7\x90",
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ 32,
+};
+
+/* From FIPS-197 */
+struct blkcipher_test_vector aes_ecb_128_test_vector = {
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ 16,
+ NULL,
+ "\x00\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
+ "\x69\xc4\xe0\xd8\x6a\x7b\x04\x30\xd8\xcd\xb7\x80\x70\xb4\xc5\x5a",
+ 16,
+};
+struct blkcipher_test_vector aes_ecb_192_test_vector = {
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17",
+ 24,
+ NULL,
+ "\x00\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
+ "\xdd\xa9\x7c\xa4\x86\x4c\xdf\xe0\x6e\xaf\x70\xa0\xec\x0d\x71\x91",
+ 16,
+};
+struct blkcipher_test_vector aes_ecb_256_test_vector = {
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ 32,
+ NULL,
+ "\x00\x11\x22\x33\x44\x55\x66\x77\x88\x99\xaa\xbb\xcc\xdd\xee\xff",
+ "\x8e\xa2\xb7\xca\x51\x67\x45\xbf\xea\xfc\x49\x90\x4b\x49\x60\x89",
+ 16,
+};
+
+/* From RFC 3602 */
+struct blkcipher_test_vector aes_cbc_128_test_vector = {
+ "\x06\xa9\x21\x40\x36\xb8\xa1\x5b\x51\x2e\x03\xd5\x34\x12\x00\x06",
+ 16,
+ "\x3d\xaf\xba\x42\x9d\x9e\xb4\x30\xb4\x22\xda\x80\x2c\x9f\xac\x41",
+ "Single block msg",
+ "\xe3\x53\x77\x9c\x10\x79\xae\xb8\x27\x08\x94\x2d\xbe\x77\x18\x1a",
+ 16
+};
+/* From NISP SP800-38A */
+struct blkcipher_test_vector aes_cbc_192_test_vector = {
+ "\x8e\x73\xb0\xf7\xda\x0e\x64\x52\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ 24,
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ "\x4f\x02\x1d\xb2\x43\xbc\x63\x3d\x71\x78\x18\x3a\x9f\xa0\x71\xe8"
+ "\xb4\xd9\xad\xa9\xad\x7d\xed\xf4\xe5\xe7\x38\x76\x3f\x69\x14\x5a"
+ "\x57\x1b\x24\x20\x12\xfb\x7a\xe0\x7f\xa9\xba\xac\x3d\xf1\x02\xe0"
+ "\x08\xb0\xe2\x79\x88\x59\x88\x81\xd9\x20\xa9\xe6\x4f\x56\x15\xcd",
+ 64
+};
+struct blkcipher_test_vector aes_cbc_256_test_vector = {
+ "\x60\x3d\xeb\x10\x15\xca\x71\xbe\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ 32,
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ "\xf5\x8c\x4c\x04\xd6\xe5\xf1\xba\x77\x9e\xab\xfb\x5f\x7b\xfb\xd6"
+ "\x9c\xfc\x4e\x96\x7e\xdb\x80\x8d\x67\x9f\x77\x7b\xc6\x70\x2c\x7d"
+ "\x39\xf2\x33\x69\xa9\xd9\xba\xcf\xa5\x30\xe2\x63\x04\x23\x14\x61"
+ "\xb2\xeb\x05\xe2\xc3\x9b\xe9\xfc\xda\x6c\x19\x07\x8c\x6a\x9d\x1b",
+ 64
+};
+
+
+
+/*** Helper functions ***/
+
+static int tf_self_test_digest(const char *alg_name,
+ const struct digest_test_vector *tv)
+{
+ unsigned char digest[64];
+ unsigned char input[256];
+ struct scatterlist sg;
+ struct hash_desc desc = {NULL, 0};
+ int error;
+ size_t digest_length;
+
+ desc.tfm = crypto_alloc_hash(alg_name, 0, 0);
+ if (IS_ERR_OR_NULL(desc.tfm)) {
+ ERROR("crypto_alloc_hash(%s) failed", alg_name);
+ error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm);
+ goto abort;
+ }
+
+ digest_length = crypto_hash_digestsize(desc.tfm);
+ INFO("alg_name=%s driver_name=%s digest_length=%u",
+ alg_name,
+ crypto_tfm_alg_driver_name(crypto_hash_tfm(desc.tfm)),
+ digest_length);
+ if (digest_length > sizeof(digest)) {
+ ERROR("digest length too large (%zu > %zu)",
+ digest_length, sizeof(digest));
+ error = -ENOMEM;
+ goto abort;
+ }
+
+ if (tv->key != NULL) {
+ error = crypto_hash_setkey(desc.tfm, tv->key, tv->key_length);
+ if (error) {
+ ERROR("crypto_hash_setkey(%s) failed: %d",
+ alg_name, error);
+ goto abort;
+ }
+ TF_TRACE_ARRAY(tv->key, tv->key_length);
+ }
+ error = crypto_hash_init(&desc);
+ if (error) {
+ ERROR("crypto_hash_init(%s) failed: %d", alg_name, error);
+ goto abort;
+ }
+
+ /* The test vector data is in vmalloc'ed memory since it's a module
+ global. Copy it to the stack, since the crypto API doesn't support
+ vmalloc'ed memory. */
+ if (tv->length > sizeof(input)) {
+ ERROR("data too large (%zu > %zu)",
+ tv->length, sizeof(input));
+ error = -ENOMEM;
+ goto abort;
+ }
+ memcpy(input, tv->text, tv->length);
+ INFO("sg_init_one(%p, %p, %u)", &sg, input, tv->length);
+ sg_init_one(&sg, input, tv->length);
+
+ TF_TRACE_ARRAY(input, tv->length);
+ error = crypto_hash_update(&desc, &sg, tv->length);
+ if (error) {
+ ERROR("crypto_hash_update(%s) failed: %d",
+ alg_name, error);
+ goto abort;
+ }
+
+ error = crypto_hash_final(&desc, digest);
+ if (error) {
+ ERROR("crypto_hash_final(%s) failed: %d", alg_name, error);
+ goto abort;
+ }
+
+ crypto_free_hash(desc.tfm);
+ desc.tfm = NULL;
+
+ if (memcmp(digest, tv->digest, digest_length)) {
+ TF_TRACE_ARRAY(digest, digest_length);
+ ERROR("wrong %s digest value", alg_name);
+ pr_err("[SMC Driver] error: SMC Driver POST FAILURE (%s)\n",
+ alg_name);
+ error = -EINVAL;
+ } else {
+ INFO("%s: digest successful", alg_name);
+ error = 0;
+ }
+ return error;
+
+abort:
+ if (!IS_ERR_OR_NULL(desc.tfm))
+ crypto_free_hash(desc.tfm);
+ pr_err("[SMC Driver] error: SMC Driver POST FAILURE (%s)\n", alg_name);
+ return error;
+}
+
+static int tf_self_test_perform_blkcipher(
+ const char *alg_name,
+ const struct blkcipher_test_vector *tv,
+ bool decrypt)
+{
+ struct blkcipher_desc desc = {0};
+ struct scatterlist sg_in, sg_out;
+ unsigned char *in = NULL;
+ unsigned char *out = NULL;
+ unsigned in_size, out_size;
+ int error;
+
+ desc.tfm = crypto_alloc_blkcipher(alg_name, 0, 0);
+ if (IS_ERR_OR_NULL(desc.tfm)) {
+ ERROR("crypto_alloc_blkcipher(%s) failed", alg_name);
+ error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm);
+ goto abort;
+ }
+ INFO("%s alg_name=%s driver_name=%s key_size=%u block_size=%u",
+ decrypt ? "decrypt" : "encrypt", alg_name,
+ crypto_tfm_alg_driver_name(crypto_blkcipher_tfm(desc.tfm)),
+ tv->key_length * 8,
+ crypto_blkcipher_blocksize(desc.tfm));
+
+ in_size = tv->length;
+ in = kmalloc(in_size, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(in)) {
+ ERROR("kmalloc(%u) failed: %d", in_size, (int)in);
+ error = (in == NULL ? -ENOMEM : (int)in);
+ goto abort;
+ }
+ memcpy(in, decrypt ? tv->ciphertext : tv->plaintext,
+ tv->length);
+
+ out_size = tv->length + crypto_blkcipher_blocksize(desc.tfm);
+ out = kmalloc(out_size, GFP_KERNEL);
+ if (IS_ERR_OR_NULL(out)) {
+ ERROR("kmalloc(%u) failed: %d", out_size, (int)out);
+ error = (out == NULL ? -ENOMEM : (int)out);
+ goto abort;
+ }
+
+ error = crypto_blkcipher_setkey(desc.tfm, tv->key, tv->key_length);
+ if (error) {
+ ERROR("crypto_alloc_setkey(%s) failed", alg_name);
+ goto abort;
+ }
+ TF_TRACE_ARRAY(tv->key, tv->key_length);
+ if (tv->iv != NULL) {
+ unsigned iv_length = crypto_blkcipher_ivsize(desc.tfm);
+ crypto_blkcipher_set_iv(desc.tfm, tv->iv, iv_length);
+ TF_TRACE_ARRAY(tv->iv, iv_length);
+ }
+
+ sg_init_one(&sg_in, in, tv->length);
+ sg_init_one(&sg_out, out, tv->length);
+ TF_TRACE_ARRAY(in, tv->length);
+ (decrypt ? crypto_blkcipher_decrypt : crypto_blkcipher_encrypt)
+ (&desc, &sg_out, &sg_in, tv->length);
+ if (error) {
+ ERROR("crypto_blkcipher_%s(%s) failed",
+ decrypt ? "decrypt" : "encrypt", alg_name);
+ goto abort;
+ }
+ TF_TRACE_ARRAY(out, tv->length);
+
+ crypto_free_blkcipher(desc.tfm);
+
+ if (memcmp((decrypt ? tv->plaintext : tv->ciphertext),
+ out, tv->length)) {
+ ERROR("Wrong %s/%u %s result", alg_name, tv->key_length * 8,
+ decrypt ? "decryption" : "encryption");
+ error = -EINVAL;
+ } else {
+ INFO("%s/%u: %s successful", alg_name, tv->key_length * 8,
+ decrypt ? "decryption" : "encryption");
+ error = 0;
+ }
+ kfree(in);
+ kfree(out);
+ return error;
+
+abort:
+ if (!IS_ERR_OR_NULL(desc.tfm))
+ crypto_free_blkcipher(desc.tfm);
+ if (!IS_ERR_OR_NULL(out))
+ kfree(out);
+ if (!IS_ERR_OR_NULL(in))
+ kfree(in);
+ return error;
+}
+
+static int tf_self_test_blkcipher(const char *alg_name,
+ const struct blkcipher_test_vector *tv)
+{
+ int encryption_outcome =
+ tf_self_test_perform_blkcipher(alg_name, tv, false);
+ int decryption_outcome =
+ tf_self_test_perform_blkcipher(alg_name, tv, true);
+ if (encryption_outcome == 0 && decryption_outcome == 0) {
+ return 0;
+ } else {
+ pr_err("[SMC Driver] error: SMC Driver POST FAILURE (%s/%u)",
+ alg_name, tv->key_length * 8);
+ return -EINVAL;
+ }
+}
+
+
+
+/*** Integrity check ***/
+
+#if defined(CONFIG_MODULE_EXTRA_COPY) && defined(MODULE)
+
+static ssize_t scan_hex(unsigned char *const buf,
+ size_t buf_size,
+ const char *const hex)
+{
+ size_t bi = 0;
+ size_t hi;
+ unsigned prev = -1, cur;
+ for (hi = 0; hex[hi] != 0; hi++) {
+ if (hex[hi] >= '0' && hex[hi] <= '9')
+ cur = hex[hi] - '0';
+ else if (hex[hi] >= 'a' && hex[hi] <= 'f')
+ cur = hex[hi] - 'a' + 10;
+ else if (hex[hi] >= 'A' && hex[hi] <= 'f')
+ cur = hex[hi] - 'F' + 10;
+ else if (hex[hi] == '-' || hex[hi] == ' ')
+ continue;
+ else {
+ ERROR("invalid character at %zu (%u)", hi, hex[hi]);
+ return -EINVAL;
+ }
+ if (prev == -1)
+ prev = cur;
+ else {
+ if (bi >= buf_size) {
+ ERROR("buffer too large at %zu", hi);
+ return -ENOSPC;
+ }
+ buf[bi++] = prev << 4 | cur;
+ prev = -1;
+ }
+ }
+ return bi;
+}
+
+/* Allocate a scatterlist for a vmalloc block. The scatterlist is allocated
+ with kmalloc. Buffers of arbitrary alignment are supported.
+ This function is derived from other vmalloc_to_sg functions in the kernel
+ tree, but note that its second argument is a size in bytes, not in pages.
+ */
+static struct scatterlist *vmalloc_to_sg(unsigned char *const buf,
+ size_t const bytes)
+{
+ struct scatterlist *sg_array = NULL;
+ struct page *pg;
+ /* Allow non-page-aligned pointers, so the first and last page may
+ both be partial. */
+ unsigned const page_count = bytes / PAGE_SIZE + 2;
+ unsigned char *ptr;
+ unsigned i;
+
+ sg_array = kcalloc(page_count, sizeof(*sg_array), GFP_KERNEL);
+ if (sg_array == NULL)
+ goto abort;
+ sg_init_table(sg_array, page_count);
+ for (i = 0, ptr = (void *)((unsigned long)buf & PAGE_MASK);
+ ptr < buf + bytes;
+ i++, ptr += PAGE_SIZE) {
+ pg = vmalloc_to_page(ptr);
+ if (pg == NULL)
+ goto abort;
+ sg_set_page(&sg_array[i], pg, PAGE_SIZE, 0);
+ }
+ /* Rectify the first page which may be partial. The last page may
+ also be partial but its offset is correct so it doesn't matter. */
+ sg_array[0].offset = offset_in_page(buf);
+ sg_array[0].length = PAGE_SIZE - offset_in_page(buf);
+ return sg_array;
+abort:
+ if (sg_array != NULL)
+ kfree(sg_array);
+ return NULL;
+}
+
+static unsigned char tf_integrity_hmac_sha256_key[] = {
+ 0x6c, 0x99, 0x2c, 0x8a, 0x26, 0x98, 0xd1, 0x09,
+ 0x5c, 0x18, 0x20, 0x42, 0x51, 0xaf, 0xf7, 0xad,
+ 0x6b, 0x42, 0xfb, 0x1d, 0x4b, 0x44, 0xfa, 0xcc,
+ 0x37, 0x7b, 0x05, 0x6d, 0x57, 0x24, 0x5f, 0x46,
+};
+
+static int tf_self_test_integrity(const char *alg_name, struct module *mod)
+{
+ unsigned char expected[32];
+ unsigned char actual[32];
+ struct scatterlist *sg = NULL;
+ struct hash_desc desc = {NULL, 0};
+ size_t digest_length;
+ unsigned char *const key = tf_integrity_hmac_sha256_key;
+ size_t const key_length = sizeof(tf_integrity_hmac_sha256_key);
+ int error;
+
+ if (mod->raw_binary_ptr == NULL)
+ return -ENXIO;
+ if (tf_integrity_hmac_sha256_expected_value == NULL)
+ return -ENOENT;
+ INFO("expected=%s", tf_integrity_hmac_sha256_expected_value);
+ error = scan_hex(expected, sizeof(expected),
+ tf_integrity_hmac_sha256_expected_value);
+ if (error < 0) {
+ pr_err("tf_driver: Badly formatted hmac_sha256 parameter "
+ "(should be a hex string)\n");
+ return -EIO;
+ };
+
+ desc.tfm = crypto_alloc_hash(alg_name, 0, 0);
+ if (IS_ERR_OR_NULL(desc.tfm)) {
+ ERROR("crypto_alloc_hash(%s) failed", alg_name);
+ error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm);
+ goto abort;
+ }
+ digest_length = crypto_hash_digestsize(desc.tfm);
+ INFO("alg_name=%s driver_name=%s digest_length=%u",
+ alg_name,
+ crypto_tfm_alg_driver_name(crypto_hash_tfm(desc.tfm)),
+ digest_length);
+
+ error = crypto_hash_setkey(desc.tfm, key, key_length);
+ if (error) {
+ ERROR("crypto_hash_setkey(%s) failed: %d",
+ alg_name, error);
+ goto abort;
+ }
+
+ sg = vmalloc_to_sg(mod->raw_binary_ptr, mod->raw_binary_size);
+ if (IS_ERR_OR_NULL(sg)) {
+ ERROR("vmalloc_to_sg(%lu) failed: %d",
+ mod->raw_binary_size, (int)sg);
+ error = (sg == NULL ? -ENOMEM : (int)sg);
+ goto abort;
+ }
+
+ error = crypto_hash_digest(&desc, sg, mod->raw_binary_size, actual);
+ if (error) {
+ ERROR("crypto_hash_digest(%s) failed: %d",
+ alg_name, error);
+ goto abort;
+ }
+
+ kfree(sg);
+ crypto_free_hash(desc.tfm);
+
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+ if (tf_fault_injection_mask & TF_CRYPTO_ALG_INTEGRITY) {
+ pr_warning("TF: injecting fault in integrity check!\n");
+ actual[0] = 0xff;
+ actual[1] ^= 0xff;
+ }
+#endif
+ TF_TRACE_ARRAY(expected, digest_length);
+ TF_TRACE_ARRAY(actual, digest_length);
+ if (memcmp(expected, actual, digest_length)) {
+ ERROR("wrong %s digest value", alg_name);
+ error = -EINVAL;
+ } else {
+ INFO("%s: digest successful", alg_name);
+ error = 0;
+ }
+
+ return error;
+
+abort:
+ if (!IS_ERR_OR_NULL(sg))
+ kfree(sg);
+ if (!IS_ERR_OR_NULL(desc.tfm))
+ crypto_free_hash(desc.tfm);
+ return error == -ENOMEM ? error : -EIO;
+}
+
+#endif /*defined(CONFIG_MODULE_EXTRA_COPY) && defined(MODULE)*/
+
+
+/*** Sysfs entries ***/
+
+struct tf_post_data {
+ unsigned failures;
+ struct kobject kobj;
+};
+
+static const struct attribute tf_post_failures_attr = {
+ "failures",
+ THIS_MODULE,
+ 0444
+};
+
+static ssize_t tf_post_kobject_show(struct kobject *kobj,
+ struct attribute *attribute,
+ char *buf)
+{
+ struct tf_post_data *data =
+ container_of(kobj, struct tf_post_data, kobj);
+ return (data->failures == 0 ?
+ snprintf(buf, PAGE_SIZE, "0\n") :
+ snprintf(buf, PAGE_SIZE, "0x%08x\n", data->failures));
+}
+
+static const struct sysfs_ops tf_post_sysfs_ops = {
+ .show = tf_post_kobject_show,
+};
+
+static struct kobj_type tf_post_data_ktype = {
+ .sysfs_ops = &tf_post_sysfs_ops,
+};
+
+static struct tf_post_data tf_post_data;
+
+
+
+/*** POST entry point ***/
+
+unsigned tf_self_test_post_vectors(void)
+{
+ unsigned failures = 0;
+
+ dpr_info("[SMC Driver] Starting POST\n");
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+ dpr_info("%s: fault=0x%08x\n", __func__, tf_fault_injection_mask);
+#endif
+
+ if (tf_self_test_blkcipher("aes-ecb-smc", &aes_ecb_128_test_vector))
+ failures |= TF_CRYPTO_ALG_AES_ECB_128;
+ if (tf_self_test_blkcipher("aes-ecb-smc", &aes_ecb_192_test_vector))
+ failures |= TF_CRYPTO_ALG_AES_ECB_192;
+ if (tf_self_test_blkcipher("aes-ecb-smc", &aes_ecb_256_test_vector))
+ failures |= TF_CRYPTO_ALG_AES_ECB_256;
+ if (tf_self_test_blkcipher("aes-cbc-smc", &aes_cbc_128_test_vector))
+ failures |= TF_CRYPTO_ALG_AES_CBC_128;
+ if (tf_self_test_blkcipher("aes-cbc-smc", &aes_cbc_192_test_vector))
+ failures |= TF_CRYPTO_ALG_AES_CBC_192;
+ if (tf_self_test_blkcipher("aes-cbc-smc", &aes_cbc_256_test_vector))
+ failures |= TF_CRYPTO_ALG_AES_CBC_256;
+
+ if (tf_self_test_digest("sha1-smc", &sha1_test_vector))
+ failures |= TF_CRYPTO_ALG_SHA1;
+ if (tf_self_test_digest("sha224-smc", &sha224_test_vector))
+ failures |= TF_CRYPTO_ALG_SHA224;
+ if (tf_self_test_digest("sha256-smc", &sha256_test_vector))
+ failures |= TF_CRYPTO_ALG_SHA256;
+
+ if (tf_self_test_digest("tf_hmac(sha1-smc)",
+ &hmac_sha1_test_vector))
+ failures |= TF_CRYPTO_ALG_HMAC_SHA1;
+ if (tf_self_test_digest("tf_hmac(sha224-smc)",
+ &hmac_sha224_test_vector))
+ failures |= TF_CRYPTO_ALG_HMAC_SHA224;
+ if (tf_self_test_digest("tf_hmac(sha256-smc)",
+ &hmac_sha256_test_vector))
+ failures |= TF_CRYPTO_ALG_HMAC_SHA256;
+
+#if defined(CONFIG_MODULE_EXTRA_COPY) && defined(MODULE)
+ switch (tf_self_test_integrity("tf_hmac(sha256-smc)", &__this_module)) {
+ case 0:
+ pr_notice("[SMC Driver] integrity check passed\n");
+ break;
+ case -ENXIO:
+ pr_warning("[SMC Driver] integrity check can only be run "
+ "when the module is loaded");
+ if (tf_post_data.failures & TF_CRYPTO_ALG_INTEGRITY) {
+ pr_notice("[SMC Driver] "
+ "integrity check initially failed\n");
+ failures |= TF_CRYPTO_ALG_INTEGRITY;
+ } else
+ pr_notice("[SMC Driver] "
+ "integrity check initially passed\n");
+ break;
+ case -ENOENT:
+ pr_warning("[SCM Driver] "
+ "integrity check cannot be made\n");
+ pr_notice("[SCM Driver] "
+ "you must pass the hmac_sha256 parameter\n");
+ /* FALLTHROUGH */
+ default:
+ pr_err("[SCM Driver] error: "
+ "SMC Driver POST FAILURE "
+ "(Integrity check HMAC-SHA-256)\n");
+ failures |= TF_CRYPTO_ALG_INTEGRITY;
+ break;
+ }
+#endif
+
+ if (failures) {
+ pr_notice("[SMC Driver] failures in POST (0x%08x)\n",
+ failures);
+ } else {
+ pr_notice("[SMC Driver] init successful\n");
+ }
+
+ tf_post_data.failures = failures;
+ return failures;
+}
+
+void tf_self_test_post_exit(void)
+{
+ tf_post_data.failures = 0;
+ kobject_put(&tf_post_data.kobj);
+}
+
+int __init tf_self_test_post_init(struct kobject *parent)
+{
+ int error = 0;
+
+ if (parent != NULL) {
+ error = kobject_init_and_add(&tf_post_data.kobj,
+ &tf_post_data_ktype,
+ parent,
+ "post");
+ if (error)
+ goto abort;
+ error = sysfs_create_file(&tf_post_data.kobj,
+ &tf_post_failures_attr);
+ if (error)
+ goto abort;
+ }
+
+ return tf_self_test_post_vectors();
+
+abort:
+ tf_self_test_post_exit();
+ if (error == 0)
+ error = -ENOMEM;
+ return error;
+}
diff --git a/security/smc/tf_teec.c b/security/smc/tf_teec.c
index 6e6d5b2..989ac8a 100644
--- a/security/smc/tf_teec.c
+++ b/security/smc/tf_teec.c
@@ -50,8 +50,7 @@
arbitrary but one-to-one for supported error codes. */
int TEEC_decode_error(TEEC_Result ret)
{
- switch (ret)
- {
+ switch (ret) {
case TEEC_SUCCESS: return 0;
case TEEC_ERROR_GENERIC: return -EIO;
case TEEC_ERROR_ACCESS_DENIED: return -EPERM;
@@ -73,15 +72,15 @@ int TEEC_decode_error(TEEC_Result ret)
default: return -EIO;
}
}
+EXPORT_SYMBOL(TEEC_decode_error);
/* Associate POSIX/Linux errors to TEEC errors. The matching is somewhat
arbitrary, but TEEC_encode_error(TEEC_decode_error(x))==x for supported
error codes. */
TEEC_Result TEEC_encode_error(int err)
{
- if (err >= 0) {
+ if (err >= 0)
return S_SUCCESS;
- }
switch (err) {
case 0: return TEEC_SUCCESS;
case -EIO: return TEEC_ERROR_GENERIC;
@@ -104,21 +103,21 @@ TEEC_Result TEEC_encode_error(int err)
default: return TEEC_ERROR_GENERIC;
}
}
+EXPORT_SYMBOL(TEEC_encode_error);
/* Encode a TEEC time limit into an SChannel time limit. */
static u64 TEEC_encode_timeout(const TEEC_TimeLimit *timeLimit)
{
- if (timeLimit == NULL) {
+ if (timeLimit == NULL)
return (u64)-1;
- } else {
+ else
return *timeLimit;
- }
}
/* Convert a timeout into a time limit in our internal format. */
-void TEEC_GetTimeLimit(TEEC_Context* sContext,
+void TEEC_GetTimeLimit(TEEC_Context *sContext,
uint32_t nTimeout, /*ms from now*/
- TEEC_TimeLimit* sTimeLimit)
+ TEEC_TimeLimit *sTimeLimit)
{
/*Use the kernel time as the TEE time*/
struct timeval now;
@@ -128,6 +127,7 @@ void TEEC_GetTimeLimit(TEEC_Context* sContext,
now.tv_usec / 1000 +
nTimeout);
}
+EXPORT_SYMBOL(TEEC_GetTimeLimit);
#define TF_PARAM_TYPE_INPUT_FLAG 0x1
#define TF_PARAM_TYPE_OUTPUT_FLAG 0x2
@@ -197,9 +197,8 @@ void TEEC_decode_parameters(union tf_answer_param *params,
TEEC_Operation *operation)
{
unsigned i;
- if (operation == NULL) {
+ if (operation == NULL)
return;
- }
for (i = 0; i < 4; i++) {
unsigned ty = TF_GET_PARAM_TYPE(operation->paramTypes, i);
TEEC_Parameter *op = operation->params + i;
@@ -273,6 +272,7 @@ error:
tf_close(connection);
return TEEC_encode_error(error);
}
+EXPORT_SYMBOL(TEEC_InitializeContext);
void TEEC_FinalizeContext(TEEC_Context *context)
{
@@ -281,11 +281,12 @@ void TEEC_FinalizeContext(TEEC_Context *context)
tf_close(connection);
context->imp._connection = NULL;
}
+EXPORT_SYMBOL(TEEC_FinalizeContext);
-TEEC_Result TEEC_RegisterSharedMemory(TEEC_Context* context,
- TEEC_SharedMemory* sharedMem)
+TEEC_Result TEEC_RegisterSharedMemory(TEEC_Context *context,
+ TEEC_SharedMemory *sharedMem)
{
- union tf_command command_message = {{0}};
+ union tf_command command_message = { { 0, } };
struct tf_command_register_shared_memory *cmd =
&command_message.register_shared_memory;
union tf_answer answer_message;
@@ -308,20 +309,20 @@ TEEC_Result TEEC_RegisterSharedMemory(TEEC_Context* context,
tf_register_shared_memory(context->imp._connection,
&command_message,
&answer_message));
- if (ret == TEEC_SUCCESS) {
+ if (ret == TEEC_SUCCESS)
ret = ans->error_code;
- }
if (ret == S_SUCCESS) {
sharedMem->imp._context = context;
sharedMem->imp._block = ans->block;
}
return ret;
}
+EXPORT_SYMBOL(TEEC_RegisterSharedMemory);
-#define TEEC_POINTER_TO_ZERO_SIZED_BUFFER ((void*)0x010)
+#define TEEC_POINTER_TO_ZERO_SIZED_BUFFER ((void *)0x010)
-TEEC_Result TEEC_AllocateSharedMemory(TEEC_Context* context,
- TEEC_SharedMemory* sharedMem)
+TEEC_Result TEEC_AllocateSharedMemory(TEEC_Context *context,
+ TEEC_SharedMemory *sharedMem)
{
TEEC_Result ret;
dprintk(KERN_DEBUG "TEEC_AllocateSharedMemory: requested=%lu",
@@ -333,32 +334,30 @@ TEEC_Result TEEC_AllocateSharedMemory(TEEC_Context* context,
sharedMem->buffer = TEEC_POINTER_TO_ZERO_SIZED_BUFFER;
} else {
sharedMem->buffer = internal_vmalloc(sharedMem->size);
- if (sharedMem->buffer == NULL)
- {
- dprintk(KERN_INFO "TEEC_AllocateSharedMemory: could not allocate %lu bytes",
+ if (sharedMem->buffer == NULL) {
+ dprintk(KERN_INFO "TEEC_AllocateSharedMemory: could "
+ "not allocate %lu bytes",
(unsigned long)sharedMem->size);
return TEEC_ERROR_OUT_OF_MEMORY;
}
}
ret = TEEC_RegisterSharedMemory(context, sharedMem);
- if (ret == TEEC_SUCCESS)
- {
+ if (ret == TEEC_SUCCESS) {
sharedMem->imp._allocated = 1;
- }
- else
- {
+ } else {
internal_vfree(sharedMem->buffer);
sharedMem->buffer = NULL;
memset(&sharedMem->imp, 0, sizeof(sharedMem->imp));
}
return ret;
}
+EXPORT_SYMBOL(TEEC_AllocateSharedMemory);
-void TEEC_ReleaseSharedMemory(TEEC_SharedMemory* sharedMem)
+void TEEC_ReleaseSharedMemory(TEEC_SharedMemory *sharedMem)
{
TEEC_Context *context = sharedMem->imp._context;
- union tf_command command_message = {{0}};
+ union tf_command command_message = { { 0, } };
struct tf_command_release_shared_memory *cmd =
&command_message.release_shared_memory;
union tf_answer answer_message;
@@ -373,28 +372,28 @@ void TEEC_ReleaseSharedMemory(TEEC_SharedMemory* sharedMem)
&command_message,
&answer_message);
if (sharedMem->imp._allocated) {
- if (sharedMem->buffer != TEEC_POINTER_TO_ZERO_SIZED_BUFFER) {
+ if (sharedMem->buffer != TEEC_POINTER_TO_ZERO_SIZED_BUFFER)
internal_vfree(sharedMem->buffer);
- }
sharedMem->buffer = NULL;
sharedMem->size = 0;
}
memset(&sharedMem->imp, 0, sizeof(sharedMem->imp));
}
+EXPORT_SYMBOL(TEEC_ReleaseSharedMemory);
-TEEC_Result TEEC_OpenSessionEx(TEEC_Context* context,
- TEEC_Session* session,
- const TEEC_TimeLimit* timeLimit,
- const TEEC_UUID* destination,
+TEEC_Result TEEC_OpenSessionEx(TEEC_Context *context,
+ TEEC_Session *session,
+ const TEEC_TimeLimit *timeLimit,
+ const TEEC_UUID *destination,
u32 connectionMethod,
- void* connectionData,
- TEEC_Operation* operation,
- u32* errorOrigin)
+ void *connectionData,
+ TEEC_Operation *operation,
+ u32 *errorOrigin)
{
- union tf_command command_message = {{0}};
+ union tf_command command_message = { { 0, } };
struct tf_command_open_client_session *cmd =
&command_message.open_client_session;
- union tf_answer answer_message = {{0}};
+ union tf_answer answer_message = { { 0, } };
struct tf_answer_open_client_session *ans =
&answer_message.open_client_session;
TEEC_Result ret;
@@ -413,8 +412,7 @@ TEEC_Result TEEC_OpenSessionEx(TEEC_Context* context,
cmd->login_type = connectionMethod;
TEEC_encode_parameters(&cmd->param_types, cmd->params, operation);
- switch (connectionMethod)
- {
+ switch (connectionMethod) {
case TEEC_LOGIN_PRIVILEGED:
case TEEC_LOGIN_PUBLIC:
break;
@@ -442,23 +440,23 @@ TEEC_Result TEEC_OpenSessionEx(TEEC_Context* context,
TEEC_ORIGIN_COMMS);
}
- if (ret == TEEC_SUCCESS) {
+ if (ret == TEEC_SUCCESS)
ret = ans->error_code;
- }
if (ret == S_SUCCESS) {
session->imp._client_session = ans->client_session;
session->imp._context = context;
}
return ret;
}
+EXPORT_SYMBOL(TEEC_OpenSessionEx);
-TEEC_Result TEEC_OpenSession(TEEC_Context* context,
- TEEC_Session* session,
- const TEEC_UUID* destination,
+TEEC_Result TEEC_OpenSession(TEEC_Context *context,
+ TEEC_Session *session,
+ const TEEC_UUID *destination,
u32 connectionMethod,
- void* connectionData,
- TEEC_Operation* operation,
- u32* errorOrigin)
+ void *connectionData,
+ TEEC_Operation *operation,
+ u32 *errorOrigin)
{
return TEEC_OpenSessionEx(context, session,
NULL, /*timeLimit*/
@@ -466,12 +464,13 @@ TEEC_Result TEEC_OpenSession(TEEC_Context* context,
connectionMethod, connectionData,
operation, errorOrigin);
}
+EXPORT_SYMBOL(TEEC_OpenSession);
-void TEEC_CloseSession(TEEC_Session* session)
+void TEEC_CloseSession(TEEC_Session *session)
{
if (session != NULL) {
TEEC_Context *context = session->imp._context;
- union tf_command command_message = {{0}};
+ union tf_command command_message = { { 0, } };
struct tf_command_close_client_session *cmd =
&command_message.close_client_session;
union tf_answer answer_message;
@@ -490,18 +489,19 @@ void TEEC_CloseSession(TEEC_Session* session)
session->imp._context = NULL;
}
}
+EXPORT_SYMBOL(TEEC_CloseSession);
-TEEC_Result TEEC_InvokeCommandEx(TEEC_Session* session,
- const TEEC_TimeLimit* timeLimit,
+TEEC_Result TEEC_InvokeCommandEx(TEEC_Session *session,
+ const TEEC_TimeLimit *timeLimit,
u32 commandID,
- TEEC_Operation* operation,
- u32* errorOrigin)
+ TEEC_Operation *operation,
+ u32 *errorOrigin)
{
TEEC_Context *context = session->imp._context;
- union tf_command command_message = {{0}};
+ union tf_command command_message = { { 0, } };
struct tf_command_invoke_client_command *cmd =
&command_message.invoke_client_command;
- union tf_answer answer_message = {{0}};
+ union tf_answer answer_message = { { 0, } };
struct tf_answer_invoke_client_command *ans =
&answer_message.invoke_client_command;
TEEC_Result ret;
@@ -531,31 +531,32 @@ TEEC_Result TEEC_InvokeCommandEx(TEEC_Session* session,
TEEC_ORIGIN_COMMS);
}
- if (ret == TEEC_SUCCESS) {
+ if (ret == TEEC_SUCCESS)
ret = ans->error_code;
- }
return ret;
}
+EXPORT_SYMBOL(TEEC_InvokeCommandEx);
-TEEC_Result TEEC_InvokeCommand(TEEC_Session* session,
+TEEC_Result TEEC_InvokeCommand(TEEC_Session *session,
u32 commandID,
- TEEC_Operation* operation,
- u32* errorOrigin)
+ TEEC_Operation *operation,
+ u32 *errorOrigin)
{
return TEEC_InvokeCommandEx(session,
NULL, /*timeLimit*/
commandID,
operation, errorOrigin);
}
+EXPORT_SYMBOL(TEEC_InvokeCommand);
TEEC_Result TEEC_send_cancellation_message(TEEC_Context *context,
u32 client_session,
u32 cancellation_id)
{
- union tf_command command_message = {{0}};
+ union tf_command command_message = { { 0, } };
struct tf_command_cancel_client_operation *cmd =
&command_message.cancel_client_operation;
- union tf_answer answer_message = {{0}};
+ union tf_answer answer_message = { { 0, } };
struct tf_answer_cancel_client_operation *ans =
&answer_message.cancel_client_operation;
TEEC_Result ret;
@@ -572,13 +573,12 @@ TEEC_Result TEEC_send_cancellation_message(TEEC_Context *context,
&command_message,
&answer_message));
- if (ret == TEEC_SUCCESS) {
+ if (ret == TEEC_SUCCESS)
ret = ans->error_code;
- }
return ret;
}
-void TEEC_RequestCancellation(TEEC_Operation* operation)
+void TEEC_RequestCancellation(TEEC_Operation *operation)
{
TEEC_Result ret;
while (1) {
@@ -609,16 +609,6 @@ void TEEC_RequestCancellation(TEEC_Operation* operation)
}
}
-EXPORT_SYMBOL(TEEC_encode_error);
-EXPORT_SYMBOL(TEEC_decode_error);
-EXPORT_SYMBOL(TEEC_InitializeContext);
-EXPORT_SYMBOL(TEEC_FinalizeContext);
-EXPORT_SYMBOL(TEEC_RegisterSharedMemory);
-EXPORT_SYMBOL(TEEC_AllocateSharedMemory);
-EXPORT_SYMBOL(TEEC_ReleaseSharedMemory);
-EXPORT_SYMBOL(TEEC_OpenSession);
-EXPORT_SYMBOL(TEEC_CloseSession);
-EXPORT_SYMBOL(TEEC_InvokeCommand);
EXPORT_SYMBOL(TEEC_RequestCancellation);
#endif /* defined(CONFIG_TF_TEEC) */
diff --git a/security/smc/tf_util.c b/security/smc/tf_util.c
index ec9941b..66d7c40 100644
--- a/security/smc/tf_util.c
+++ b/security/smc/tf_util.c
@@ -25,6 +25,19 @@
*----------------------------------------------------------------------------*/
#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+void tf_trace_array(const char *fun, const char *msg,
+ const void *ptr, size_t len)
+{
+ char hex[511];
+ bool ell = (len > sizeof(hex)/2);
+ unsigned lim = (len > sizeof(hex)/2 ? sizeof(hex)/2 : len);
+ unsigned i;
+ for (i = 0; i < lim; i++)
+ sprintf(hex + 2 * i, "%02x", ((unsigned char *)ptr)[i]);
+ pr_info("%s: %s[%u] = %s%s\n",
+ fun, msg, len, hex, ell ? "..." : "");
+}
+
void address_cache_property(unsigned long va)
{
unsigned long pa;
@@ -96,27 +109,6 @@ void address_cache_property(unsigned long va)
dprintk(KERN_INFO "Non-secure.\n");
}
-#ifdef CONFIG_BENCH_SECURE_CYCLE
-
-#define LOOP_SIZE (100000)
-
-void run_bogo_mips(void)
-{
- uint32_t cycles;
- void *address = &run_bogo_mips;
-
- dprintk(KERN_INFO "BogoMIPS:\n");
-
- setup_counters();
- cycles = run_code_speed(LOOP_SIZE);
- dprintk(KERN_INFO "%u cycles with code access\n", cycles);
- cycles = run_data_speed(LOOP_SIZE, (unsigned long)address);
- dprintk(KERN_INFO "%u cycles to access %x\n", cycles,
- (unsigned int) address);
-}
-
-#endif /* CONFIG_BENCH_SECURE_CYCLE */
-
/*
* Dump the L1 shared buffer.
*/
@@ -124,7 +116,9 @@ void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer)
{
dprintk(KERN_INFO
"buffer@%p:\n"
+ #ifndef CONFIG_TF_ZEBRA
" config_flag_s=%08X\n"
+ #endif
" version_description=%64s\n"
" status_s=%08X\n"
" sync_serial_n=%08X\n"
@@ -138,7 +132,9 @@ void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer)
" first_answer=%08X\n"
" first_free_answer=%08X\n\n",
buffer,
+ #ifndef CONFIG_TF_ZEBRA
buffer->config_flag_s,
+ #endif
buffer->version_description,
buffer->status_s,
buffer->sync_serial_n,
@@ -933,86 +929,6 @@ vma_out:
return result;
}
-#ifndef CONFIG_ANDROID
-/* This function hashes the path of the current application.
- * If data = NULL ,nothing else is added to the hash
- else add data to the hash
- */
-int tf_hash_application_path_and_data(char *buffer, void *data,
- u32 data_len)
-{
- int result = -ENOENT;
- char *buffer = NULL;
- struct mm_struct *mm;
- struct vm_area_struct *vma;
-
- buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (buffer == NULL) {
- result = -ENOMEM;
- goto end;
- }
-
- mm = current->mm;
-
- down_read(&(mm->mmap_sem));
- for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
- if ((vma->vm_flags & VM_EXECUTABLE) != 0
- && vma->vm_file != NULL) {
- struct path *path;
- char *endpath;
- size_t pathlen;
- struct sha1_ctx sha1;
- u8 hash[SHA1_DIGEST_SIZE];
-
- path = &vma->vm_file->f_path;
-
- endpath = d_path(path, buffer, PAGE_SIZE);
- if (IS_ERR(path)) {
- result = PTR_ERR(endpath);
- up_read(&(mm->mmap_sem));
- goto end;
- }
- pathlen = (buffer + PAGE_SIZE) - endpath;
-
-#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
- {
- char *c;
- dprintk(KERN_DEBUG "current process path = ");
- for (c = endpath;
- c < buffer + PAGE_SIZE;
- c++)
- dprintk("%c", *c);
-
- dprintk(", uid=%d, euid=%d\n", current_uid(),
- current_euid());
- }
-#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
-
- sha1_init(&sha1);
- sha1_update(&sha1, endpath, pathlen);
- if (data != NULL) {
- dprintk(KERN_INFO "current process path: "
- "Hashing additional data\n");
- sha1_update(&sha1, data, data_len);
- }
- sha1_final(&sha1, hash);
- memcpy(buffer, hash, sizeof(hash));
-
- result = 0;
-
- break;
- }
- }
- up_read(&(mm->mmap_sem));
-
-end:
- if (buffer != NULL)
- internal_kfree(buffer);
-
- return result;
-}
-#endif /* !CONFIG_ANDROID */
-
void *internal_kmalloc(size_t size, int priority)
{
void *ptr;
diff --git a/security/smc/tf_util.h b/security/smc/tf_util.h
index 43a05da..f556d64 100644
--- a/security/smc/tf_util.h
+++ b/security/smc/tf_util.h
@@ -39,10 +39,29 @@
*----------------------------------------------------------------------------*/
#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+extern unsigned tf_debug_level;
void address_cache_property(unsigned long va);
-#define dprintk printk
+#define dprintk(args...) ((void)(tf_debug_level >= 6 ? printk(args) : 0))
+#define dpr_info(args...) ((void)(tf_debug_level >= 3 ? pr_info(args) : 0))
+#define dpr_err(args...) ((void)(tf_debug_level >= 1 ? pr_err(args) : 0))
+#define INFO(fmt, args...) \
+ (void)dprintk(KERN_INFO "%s: " fmt "\n", __func__, ## args)
+#define WARNING(fmt, args...) \
+ (tf_debug_level >= 3 ? \
+ printk(KERN_WARNING "%s: " fmt "\n", __func__, ## args) : \
+ (void)0)
+#define ERROR(fmt, args...) \
+ (tf_debug_level >= 1 ? \
+ printk(KERN_ERR "%s: " fmt "\n", __func__, ## args) : \
+ (void)0)
+void tf_trace_array(const char *fun, const char *msg,
+ const void *ptr, size_t len);
+#define TF_TRACE_ARRAY(ptr, len) \
+ (tf_debug_level >= 7 ? \
+ tf_trace_array(__func__, #ptr "/" #len, ptr, len) : \
+ 0)
void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer);
@@ -50,16 +69,15 @@ void tf_dump_command(union tf_command *command);
void tf_dump_answer(union tf_answer *answer);
-#ifdef CONFIG_BENCH_SECURE_CYCLE
-void setup_counters(void);
-void run_bogo_mips(void);
-int run_code_speed(unsigned int loop);
-int run_data_speed(unsigned int loop, unsigned long va);
-#endif /* CONFIG_BENCH_SECURE_CYCLE */
-
#else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
#define dprintk(args...) do { ; } while (0)
+#define dpr_info(args...) do { ; } while (0)
+#define dpr_err(args...) do { ; } while (0)
+#define INFO(fmt, args...) ((void)0)
+#define WARNING(fmt, args...) ((void)0)
+#define ERROR(fmt, args...) ((void)0)
+#define TF_TRACE_ARRAY(ptr, len) ((void)(ptr), (void)(len))
#define tf_dump_l1_shared_buffer(buffer) ((void) 0)
#define tf_dump_command(command) ((void) 0)
#define tf_dump_answer(answer) ((void) 0)
@@ -74,10 +92,6 @@ int run_data_speed(unsigned int loop, unsigned long va);
int tf_get_current_process_hash(void *hash);
-#ifndef CONFIG_ANDROID
-int tf_hash_application_path_and_data(char *buffer, void *data, u32 data_len);
-#endif /* !CONFIG_ANDROID */
-
/*----------------------------------------------------------------------------
* Statistic computation
*----------------------------------------------------------------------------*/
diff --git a/security/smc/tf_zebra.h b/security/smc/tf_zebra.h
index b30fe6f..23370b7 100644
--- a/security/smc/tf_zebra.h
+++ b/security/smc/tf_zebra.h
@@ -27,18 +27,21 @@ int tf_ctrl_device_register(void);
int tf_start(struct tf_comm *comm,
u32 workspace_addr, u32 workspace_size,
u8 *pa_buffer, u32 pa_size,
- u8 *properties_buffer, u32 properties_length);
+ u32 conf_descriptor, u32 conf_offset, u32 conf_size);
/* Assembler entry points to/from secure */
u32 schedule_secure_world(u32 app_id, u32 proc_id, u32 flags, u32 args);
u32 rpc_handler(u32 p1, u32 p2, u32 p3, u32 p4);
-u32 read_mpidr(void);
-/* L4 SEC clockdomain enabling/disabling */
-void tf_l4sec_clkdm_wakeup(bool wakelock);
-void tf_l4sec_clkdm_allow_idle(bool wakeunlock);
+void tf_clock_timer_init(void);
+void tf_clock_timer_start(void);
+void tf_clock_timer_stop(void);
+u32 tf_try_disabling_secure_hwa_clocks(u32 mask);
-/* Delayed secure resume */
-int tf_delayed_secure_resume(void);
+#ifdef MODULE
+extern int __initdata (*tf_comm_early_init)(void);
+int __init tf_device_mshield_init(char *str);
+void __exit tf_device_mshield_exit(void);
+#endif
#endif /* __TF_ZEBRA_H__ */