aboutsummaryrefslogtreecommitdiffstats
path: root/security/smc/tf_comm_mshield.c
diff options
context:
space:
mode:
Diffstat (limited to 'security/smc/tf_comm_mshield.c')
-rw-r--r--security/smc/tf_comm_mshield.c632
1 files changed, 261 insertions, 371 deletions
diff --git a/security/smc/tf_comm_mshield.c b/security/smc/tf_comm_mshield.c
index c36473e..aec5b80 100644
--- a/security/smc/tf_comm_mshield.c
+++ b/security/smc/tf_comm_mshield.c
@@ -1,5 +1,5 @@
/**
- * Copyright (c) 2010 Trusted Logic S.A.
+ * Copyright (c) 2011 Trusted Logic S.A.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
@@ -33,12 +33,14 @@
#include <asm/cacheflush.h>
+#include "s_version.h"
#include "tf_defs.h"
#include "tf_comm.h"
#include "tf_util.h"
#include "tf_conn.h"
#include "tf_zebra.h"
#include "tf_crypto.h"
+#include "mach/omap4-common.h"
/*--------------------------------------------------------------------------
* Internal constants
@@ -67,7 +69,6 @@
#define RPC_ADVANCEMENT_FINISHED 2
u32 g_RPC_advancement;
-u32 g_RPC_parameters[4] = {0, 0, 0, 0};
u32 g_secure_task_id;
u32 g_service_end;
@@ -80,6 +81,8 @@ u32 g_service_end;
#define API_HAL_TASK_MGR_RPCINIT_INDEX 0x08
#define API_HAL_KM_GETSECUREROMCODECRC_INDEX 0x0B
#define API_HAL_SEC_L3_RAM_RESIZE_INDEX 0x17
+#define API_HAL_HWATURNOFF_INDEX 0x29
+#define API_HAL_ACTIVATEHWAPWRMGRPATCH_INDEX 0x2A
#define API_HAL_RET_VALUE_OK 0x0
@@ -105,37 +108,18 @@ struct tf_ns_pa_info {
void *results;
};
-/*
- * AFY: I would like to remove the L0 buffer altogether:
- * - you can use the L1 shared buffer to pass the RPC parameters and results:
- * I think these easily fit in 256 bytes and you can use the area at
- * offset 0x2C0-0x3BF in the L1 shared buffer
- */
-struct tf_init_buffer {
- u32 init_status;
- u32 protocol_version;
- u32 l1_shared_buffer_descr;
- u32 backing_store_addr;
- u32 backext_storage_addr;
- u32 workspace_addr;
- u32 workspace_size;
- u32 properties_length;
- u8 properties_buffer[1];
-};
#ifdef CONFIG_HAS_WAKELOCK
static struct wake_lock g_tf_wake_lock;
-static u32 tf_wake_lock_count = 0;
#endif
static struct clockdomain *smc_l4_sec_clkdm;
-static u32 smc_l4_sec_clkdm_use_count = 0;
static int __init tf_early_init(void)
{
g_secure_task_id = 0;
- dprintk(KERN_INFO "SMC early init\n");
+ dpr_info("SMC early init\n");
smc_l4_sec_clkdm = clkdm_lookup("l4_secure_clkdm");
if (smc_l4_sec_clkdm == NULL)
@@ -148,7 +132,120 @@ static int __init tf_early_init(void)
return 0;
}
+#ifdef MODULE
+int __initdata (*tf_comm_early_init)(void) = &tf_early_init;
+#else
early_initcall(tf_early_init);
+#endif
+
+/*
+ * The timeout timer used to power off clocks
+ */
+#define INACTIVITY_TIMER_TIMEOUT 10 /* ms */
+
+static DEFINE_SPINLOCK(clk_timer_lock);
+static struct timer_list tf_crypto_clock_timer;
+static int tf_crypto_clock_enabled;
+
+void tf_clock_timer_init(void)
+{
+ init_timer(&tf_crypto_clock_timer);
+ tf_crypto_clock_enabled = 0;
+
+ /* HWA Clocks Patch init */
+ omap4_secure_dispatcher(API_HAL_ACTIVATEHWAPWRMGRPATCH_INDEX,
+ 0, 0, 0, 0, 0, 0);
+}
+
+u32 tf_try_disabling_secure_hwa_clocks(u32 mask)
+{
+ return omap4_secure_dispatcher(API_HAL_HWATURNOFF_INDEX,
+ FLAG_START_HAL_CRITICAL, 1, mask, 0, 0, 0);
+}
+
+static void tf_clock_timer_cb(unsigned long data)
+{
+ unsigned long flags;
+ u32 ret = 0;
+
+ dprintk(KERN_INFO "%s called...\n", __func__);
+
+ spin_lock_irqsave(&clk_timer_lock, flags);
+
+ /*
+ * If one of the HWA is used (by secure or public) the timer
+ * function cuts all the HWA clocks
+ */
+ if (tf_crypto_clock_enabled) {
+ dprintk(KERN_INFO "%s; tf_crypto_clock_enabled = %d\n",
+ __func__, tf_crypto_clock_enabled);
+ goto restart;
+ }
+
+ ret = tf_crypto_turn_off_clocks();
+
+ /*
+ * From MShield-DK 1.3.3 sources:
+ *
+ * Digest: 1 << 0
+ * DES : 1 << 1
+ * AES1 : 1 << 2
+ * AES2 : 1 << 3
+ */
+ if (ret & 0xf)
+ goto restart;
+
+ wake_unlock(&g_tf_wake_lock);
+ clkdm_allow_idle(smc_l4_sec_clkdm);
+
+ spin_unlock_irqrestore(&clk_timer_lock, flags);
+
+ dprintk(KERN_INFO "%s success\n", __func__);
+ return;
+
+restart:
+ dprintk("%s: will wait one more time ret=0x%x\n", __func__, ret);
+ mod_timer(&tf_crypto_clock_timer,
+ jiffies + msecs_to_jiffies(INACTIVITY_TIMER_TIMEOUT));
+
+ spin_unlock_irqrestore(&clk_timer_lock, flags);
+}
+
+void tf_clock_timer_start(void)
+{
+ unsigned long flags;
+ dprintk(KERN_INFO "%s\n", __func__);
+
+ spin_lock_irqsave(&clk_timer_lock, flags);
+
+ tf_crypto_clock_enabled++;
+
+ wake_lock(&g_tf_wake_lock);
+ clkdm_wakeup(smc_l4_sec_clkdm);
+
+ /* Stop the timer if already running */
+ if (timer_pending(&tf_crypto_clock_timer))
+ del_timer(&tf_crypto_clock_timer);
+
+ /* Configure the timer */
+ tf_crypto_clock_timer.expires =
+ jiffies + msecs_to_jiffies(INACTIVITY_TIMER_TIMEOUT);
+ tf_crypto_clock_timer.function = tf_clock_timer_cb;
+
+ add_timer(&tf_crypto_clock_timer);
+
+ spin_unlock_irqrestore(&clk_timer_lock, flags);
+}
+
+void tf_clock_timer_stop(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&clk_timer_lock, flags);
+ tf_crypto_clock_enabled--;
+ spin_unlock_irqrestore(&clk_timer_lock, flags);
+
+ dprintk(KERN_INFO "%s\n", __func__);
+}
/*
* Function responsible for formatting parameters to pass from NS world to
@@ -161,14 +258,12 @@ u32 omap4_secure_dispatcher(u32 app_id, u32 flags, u32 nargs,
unsigned long iflags;
u32 pub2sec_args[5] = {0, 0, 0, 0, 0};
- /*dprintk(KERN_INFO "omap4_secure_dispatcher: "
- "app_id=0x%08x, flags=0x%08x, nargs=%u\n",
- app_id, flags, nargs);*/
+ /*dpr_info("%s: app_id=0x%08x, flags=0x%08x, nargs=%u\n",
+ __func__, app_id, flags, nargs);*/
/*if (nargs != 0)
- dprintk(KERN_INFO
- "omap4_secure_dispatcher: args=%08x, %08x, %08x, %08x\n",
- arg1, arg2, arg3, arg4);*/
+ dpr_info("%s: args=%08x, %08x, %08x, %08x\n",
+ __func__, arg1, arg2, arg3, arg4);*/
pub2sec_args[0] = nargs;
pub2sec_args[1] = arg1;
@@ -186,26 +281,28 @@ u32 omap4_secure_dispatcher(u32 app_id, u32 flags, u32 nargs,
/*
* Put L4 Secure clock domain to SW_WKUP so that modules are accessible
*/
- tf_l4sec_clkdm_wakeup(false);
+ clkdm_wakeup(smc_l4_sec_clkdm);
local_irq_save(iflags);
-#ifdef DEBUG
- BUG_ON((read_mpidr() & 0x00000003) != 0);
-#endif
+
/* proc_id is always 0 */
ret = schedule_secure_world(app_id, 0, flags, __pa(pub2sec_args));
local_irq_restore(iflags);
/* Restore the HW_SUP on L4 Sec clock domain so hardware can idle */
- tf_l4sec_clkdm_allow_idle(false);
+ if ((app_id != API_HAL_HWATURNOFF_INDEX) &&
+ (!timer_pending(&tf_crypto_clock_timer))) {
+ (void) tf_crypto_turn_off_clocks();
+ clkdm_allow_idle(smc_l4_sec_clkdm);
+ }
- /*dprintk(KERN_INFO "omap4_secure_dispatcher()\n");*/
+ /*dpr_info("%s()\n", __func__);*/
return ret;
}
/* Yields the Secure World */
-int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit)
+int tf_schedule_secure_world(struct tf_comm *comm)
{
int status = 0;
int ret;
@@ -220,41 +317,38 @@ int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit)
case RPC_ADVANCEMENT_NONE:
/* Return from IRQ */
appli_id = SMICODEPUB_IRQ_END;
- if (prepare_exit)
- status = STATUS_PENDING;
break;
case RPC_ADVANCEMENT_PENDING:
/* nothing to do in this case */
goto exit;
default:
case RPC_ADVANCEMENT_FINISHED:
- if (prepare_exit)
- goto exit;
appli_id = SMICODEPUB_RPC_END;
g_RPC_advancement = RPC_ADVANCEMENT_NONE;
break;
}
+ tf_clock_timer_start();
+
g_service_end = 1;
/* yield to the Secure World */
ret = omap4_secure_dispatcher(appli_id, /* app_id */
0, 0, /* flags, nargs */
0, 0, 0, 0); /* arg1, arg2, arg3, arg4 */
if (g_service_end != 0) {
- dprintk(KERN_ERR "Service End ret=%X\n", ret);
+ dpr_err("Service End ret=%X\n", ret);
if (ret == 0) {
- dmac_flush_range((void *)comm->init_shared_buffer,
- (void *)(((u32)(comm->init_shared_buffer)) +
+ dmac_flush_range((void *)comm->l1_buffer,
+ (void *)(((u32)(comm->l1_buffer)) +
PAGE_SIZE));
- outer_inv_range(__pa(comm->init_shared_buffer),
- __pa(comm->init_shared_buffer) +
+ outer_inv_range(__pa(comm->l1_buffer),
+ __pa(comm->l1_buffer) +
PAGE_SIZE);
- ret = ((struct tf_init_buffer *)
- (comm->init_shared_buffer))->init_status;
+ ret = comm->l1_buffer->exit_code;
- dprintk(KERN_ERR "SMC PA failure ret=%X\n", ret);
+ dpr_err("SMC PA failure ret=%X\n", ret);
if (ret == 0)
ret = -EFAULT;
}
@@ -264,6 +358,8 @@ int tf_schedule_secure_world(struct tf_comm *comm, bool prepare_exit)
status = ret;
}
+ tf_clock_timer_stop();
+
exit:
local_irq_restore(iflags);
@@ -278,17 +374,17 @@ static int tf_se_init(struct tf_comm *comm,
unsigned int crc;
if (comm->se_initialized) {
- dprintk(KERN_INFO "tf_se_init: SE already initialized... "
- "nothing to do\n");
+ dpr_info("%s: SE already initialized... nothing to do\n",
+ __func__);
return 0;
}
/* Secure CRC read */
- dprintk(KERN_INFO "tf_se_init: Secure CRC Read...\n");
+ dpr_info("%s: Secure CRC Read...\n", __func__);
crc = omap4_secure_dispatcher(API_HAL_KM_GETSECUREROMCODECRC_INDEX,
0, 0, 0, 0, 0, 0);
- printk(KERN_INFO "SMC: SecureCRC=0x%08X\n", crc);
+ pr_info("SMC: SecureCRC=0x%08X\n", crc);
/*
* Flush caches before resize, just to be sure there is no
@@ -304,46 +400,45 @@ static int tf_se_init(struct tf_comm *comm,
wmb();
/* SRAM resize */
- dprintk(KERN_INFO "tf_se_init: SRAM resize (52KB)...\n");
+ dpr_info("%s: SRAM resize (52KB)...\n", __func__);
error = omap4_secure_dispatcher(API_HAL_SEC_L3_RAM_RESIZE_INDEX,
FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
SEC_RAM_SIZE_52KB, 0, 0, 0);
if (error == API_HAL_RET_VALUE_OK) {
- dprintk(KERN_INFO "tf_se_init: SRAM resize OK\n");
+ dpr_info("%s: SRAM resize OK\n", __func__);
} else {
- dprintk(KERN_ERR "tf_se_init: "
- "SRAM resize failed [0x%x]\n", error);
+ dpr_err("%s: SRAM resize failed [0x%x]\n", __func__, error);
goto error;
}
/* SDP init */
- dprintk(KERN_INFO "tf_se_init: SDP runtime init..."
+ dpr_info("%s: SDP runtime init..."
"(sdp_backing_store_addr=%x, sdp_bkext_store_addr=%x)\n",
+ __func__,
sdp_backing_store_addr, sdp_bkext_store_addr);
error = omap4_secure_dispatcher(API_HAL_SDP_RUNTIMEINIT_INDEX,
FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 2,
sdp_backing_store_addr, sdp_bkext_store_addr, 0, 0);
if (error == API_HAL_RET_VALUE_OK) {
- dprintk(KERN_INFO "tf_se_init: SDP runtime init OK\n");
+ dpr_info("%s: SDP runtime init OK\n", __func__);
} else {
- dprintk(KERN_ERR "tf_se_init: "
- "SDP runtime init failed [0x%x]\n", error);
+ dpr_err("%s: SDP runtime init failed [0x%x]\n",
+ __func__, error);
goto error;
}
/* RPC init */
- dprintk(KERN_INFO "tf_se_init: RPC init...\n");
+ dpr_info("%s: RPC init...\n", __func__);
error = omap4_secure_dispatcher(API_HAL_TASK_MGR_RPCINIT_INDEX,
FLAG_START_HAL_CRITICAL, 1,
(u32) (u32(*const) (u32, u32, u32, u32)) &rpc_handler, 0, 0, 0);
if (error == API_HAL_RET_VALUE_OK) {
- dprintk(KERN_INFO "tf_se_init: RPC init OK\n");
+ dpr_info("%s: RPC init OK\n", __func__);
} else {
- dprintk(KERN_ERR "tf_se_init: "
- "RPC init failed [0x%x]\n", error);
+ dpr_err("%s: RPC init failed [0x%x]\n", __func__, error);
goto error;
}
@@ -361,21 +456,20 @@ static u32 tf_rpc_init(struct tf_comm *comm)
u32 protocol_version;
u32 rpc_error = RPC_SUCCESS;
- dprintk(KERN_INFO "tf_rpc_init(%p)\n", comm);
+ dpr_info("%s(%p)\n", __func__, comm);
spin_lock(&(comm->lock));
- dmac_flush_range((void *)comm->init_shared_buffer,
- (void *)(((u32)(comm->init_shared_buffer)) + PAGE_SIZE));
- outer_inv_range(__pa(comm->init_shared_buffer),
- __pa(comm->init_shared_buffer) + PAGE_SIZE);
+ dmac_flush_range((void *)comm->l1_buffer,
+ (void *)(((u32)(comm->l1_buffer)) + PAGE_SIZE));
+ outer_inv_range(__pa(comm->l1_buffer),
+ __pa(comm->l1_buffer) + PAGE_SIZE);
- protocol_version = ((struct tf_init_buffer *)
- (comm->init_shared_buffer))->protocol_version;
+ protocol_version = comm->l1_buffer->protocol_version;
if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
!= TF_S_PROTOCOL_MAJOR_VERSION) {
- dprintk(KERN_ERR "SMC: Unsupported SMC Protocol PA Major "
+ dpr_err("SMC: Unsupported SMC Protocol PA Major "
"Version (0x%02x, expected 0x%02x)!\n",
GET_PROTOCOL_MAJOR_VERSION(protocol_version),
TF_S_PROTOCOL_MAJOR_VERSION);
@@ -386,20 +480,16 @@ static u32 tf_rpc_init(struct tf_comm *comm)
spin_unlock(&(comm->lock));
- register_smc_public_crypto_digest();
- register_smc_public_crypto_aes();
-
return rpc_error;
}
static u32 tf_rpc_trace(struct tf_comm *comm)
{
- dprintk(KERN_INFO "tf_rpc_trace(%p)\n", comm);
+ dpr_info("%s(%p)\n", __func__, comm);
#ifdef CONFIG_SECURE_TRACE
spin_lock(&(comm->lock));
- printk(KERN_INFO "SMC PA: %s",
- comm->pBuffer->rpc_trace_buffer);
+ pr_info("SMC PA: %s", comm->l1_buffer->rpc_trace_buffer);
spin_unlock(&(comm->lock));
#endif
return RPC_SUCCESS;
@@ -419,40 +509,39 @@ int tf_rpc_execute(struct tf_comm *comm)
u32 rpc_command;
u32 rpc_error = RPC_NO;
-#ifdef DEBUG
- BUG_ON((read_mpidr() & 0x00000003) != 0);
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+ BUG_ON((hard_smp_processor_id() & 0x00000003) != 0);
#endif
/* Lock the RPC */
mutex_lock(&(comm->rpc_mutex));
- rpc_command = g_RPC_parameters[1];
+ rpc_command = comm->l1_buffer->rpc_command;
if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
- dprintk(KERN_INFO "tf_rpc_execute: "
- "Executing CMD=0x%x\n",
- g_RPC_parameters[1]);
+ dpr_info("%s: Executing CMD=0x%x\n",
+ __func__, rpc_command);
switch (rpc_command) {
case RPC_CMD_YIELD:
- dprintk(KERN_INFO "tf_rpc_execute: "
- "RPC_CMD_YIELD\n");
+ dpr_info("%s: RPC_CMD_YIELD\n", __func__);
rpc_error = RPC_YIELD;
- g_RPC_parameters[0] = RPC_SUCCESS;
+ comm->l1_buffer->rpc_status = RPC_SUCCESS;
break;
case RPC_CMD_TRACE:
rpc_error = RPC_NON_YIELD;
- g_RPC_parameters[0] = tf_rpc_trace(comm);
+ comm->l1_buffer->rpc_status = tf_rpc_trace(comm);
break;
default:
if (tf_crypto_execute_rpc(rpc_command,
- comm->pBuffer->rpc_cus_buffer) != 0)
- g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS;
+ comm->l1_buffer->rpc_cus_buffer) != 0)
+ comm->l1_buffer->rpc_status =
+ RPC_ERROR_BAD_PARAMETERS;
else
- g_RPC_parameters[0] = RPC_SUCCESS;
+ comm->l1_buffer->rpc_status = RPC_SUCCESS;
rpc_error = RPC_NON_YIELD;
break;
}
@@ -461,50 +550,12 @@ int tf_rpc_execute(struct tf_comm *comm)
mutex_unlock(&(comm->rpc_mutex));
- dprintk(KERN_INFO "tf_rpc_execute: Return 0x%x\n",
- rpc_error);
+ dpr_info("%s: Return 0x%x\n", __func__, rpc_error);
return rpc_error;
}
/*--------------------------------------------------------------------------
- * L4 SEC Clock domain handling
- *-------------------------------------------------------------------------- */
-
-static DEFINE_SPINLOCK(clk_lock);
-void tf_l4sec_clkdm_wakeup(bool wakelock)
-{
- unsigned long flags;
- spin_lock_irqsave(&clk_lock, flags);
-#ifdef CONFIG_HAS_WAKELOCK
- if (wakelock) {
- tf_wake_lock_count++;
- wake_lock(&g_tf_wake_lock);
- }
-#endif
- smc_l4_sec_clkdm_use_count++;
- clkdm_wakeup(smc_l4_sec_clkdm);
- spin_unlock_irqrestore(&clk_lock, flags);
-}
-
-void tf_l4sec_clkdm_allow_idle(bool wakeunlock)
-{
- unsigned long flags;
- spin_lock_irqsave(&clk_lock, flags);
- smc_l4_sec_clkdm_use_count--;
- if (smc_l4_sec_clkdm_use_count == 0)
- clkdm_allow_idle(smc_l4_sec_clkdm);
-#ifdef CONFIG_HAS_WAKELOCK
- if (wakeunlock){
- tf_wake_lock_count--;
- if (tf_wake_lock_count == 0)
- wake_unlock(&g_tf_wake_lock);
- }
-#endif
- spin_unlock_irqrestore(&clk_lock, flags);
-}
-
-/*--------------------------------------------------------------------------
* Power management
*-------------------------------------------------------------------------- */
/*
@@ -520,7 +571,7 @@ int tf_pm_shutdown(struct tf_comm *comm)
union tf_command command;
union tf_answer answer;
- dprintk(KERN_INFO "tf_pm_shutdown()\n");
+ dpr_info("%s()\n", __func__);
memset(&command, 0, sizeof(command));
@@ -539,17 +590,16 @@ int tf_pm_shutdown(struct tf_comm *comm)
false);
if (error != 0) {
- dprintk(KERN_ERR "tf_pm_shutdown(): "
- "tf_send_receive failed (error %d)!\n",
- error);
+ dpr_err("%s(): tf_send_receive failed (error %d)!\n",
+ __func__, error);
return error;
}
#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
if (answer.header.error_code != 0)
- dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
+ dpr_err("tf_driver: shutdown failed.\n");
else
- dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
+ dpr_info("tf_driver: shutdown succeeded.\n");
#endif
return answer.header.error_code;
@@ -560,7 +610,7 @@ int tf_pm_hibernate(struct tf_comm *comm)
{
struct tf_device *dev = tf_get_device();
- dprintk(KERN_INFO "tf_pm_hibernate()\n");
+ dpr_info("%s()\n", __func__);
/*
* As we enter in CORE OFF, the keys are going to be cleared.
@@ -576,101 +626,11 @@ int tf_pm_hibernate(struct tf_comm *comm)
return 0;
}
-#ifdef CONFIG_SMC_KERNEL_CRYPTO
-#define DELAYED_RESUME_NONE 0
-#define DELAYED_RESUME_PENDING 1
-#define DELAYED_RESUME_ONGOING 2
-
-static DEFINE_SPINLOCK(tf_delayed_resume_lock);
-static int tf_need_delayed_resume = DELAYED_RESUME_NONE;
-
-int tf_delayed_secure_resume(void)
-{
- int ret;
- union tf_command message;
- union tf_answer answer;
- struct tf_device *dev = tf_get_device();
-
- spin_lock(&tf_delayed_resume_lock);
- if (likely(tf_need_delayed_resume == DELAYED_RESUME_NONE)) {
- spin_unlock(&tf_delayed_resume_lock);
- return 0;
- }
-
- if (unlikely(tf_need_delayed_resume == DELAYED_RESUME_ONGOING)) {
- spin_unlock(&tf_delayed_resume_lock);
-
- /*
- * Wait for the other caller to actually finish the delayed
- * resume operation
- */
- while (tf_need_delayed_resume != DELAYED_RESUME_NONE)
- cpu_relax();
-
- return 0;
- }
-
- tf_need_delayed_resume = DELAYED_RESUME_ONGOING;
- spin_unlock(&tf_delayed_resume_lock);
-
- /*
- * When the system leaves CORE OFF, HWA are configured as secure. We
- * need them as public for the Linux Crypto API.
- */
- memset(&message, 0, sizeof(message));
-
- message.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
- message.header.message_size =
- (sizeof(struct tf_command_management) -
- sizeof(struct tf_command_header))/sizeof(u32);
- message.management.command =
- TF_MANAGEMENT_RESUME_FROM_CORE_OFF;
-
- ret = tf_send_receive(&dev->sm, &message, &answer, NULL, false);
- if (ret) {
- printk(KERN_ERR "tf_pm_resume(%p): "
- "tf_send_receive failed (error %d)!\n",
- &dev->sm, ret);
-
- unregister_smc_public_crypto_digest();
- unregister_smc_public_crypto_aes();
- return ret;
- }
-
- if (answer.header.error_code) {
- unregister_smc_public_crypto_digest();
- unregister_smc_public_crypto_aes();
- }
-
- spin_lock(&tf_delayed_resume_lock);
- tf_need_delayed_resume = DELAYED_RESUME_NONE;
- spin_unlock(&tf_delayed_resume_lock);
-
- return answer.header.error_code;
-}
-#endif
-
int tf_pm_resume(struct tf_comm *comm)
{
- dprintk(KERN_INFO "tf_pm_resume()\n");
- #if 0
- {
- void *workspace_va;
- struct tf_device *dev = tf_get_device();
- workspace_va = ioremap(dev->workspace_addr,
- dev->workspace_size);
- printk(KERN_INFO
- "Read first word of workspace [0x%x]\n",
- *(uint32_t *)workspace_va);
- }
- #endif
-
-#ifdef CONFIG_SMC_KERNEL_CRYPTO
- spin_lock(&tf_delayed_resume_lock);
- tf_need_delayed_resume = DELAYED_RESUME_PENDING;
- spin_unlock(&tf_delayed_resume_lock);
-#endif
+ dpr_info("%s()\n", __func__);
+ tf_aes_pm_resume();
return 0;
}
@@ -682,8 +642,7 @@ int tf_init(struct tf_comm *comm)
{
spin_lock_init(&(comm->lock));
comm->flags = 0;
- comm->pBuffer = NULL;
- comm->init_shared_buffer = NULL;
+ comm->l1_buffer = NULL;
comm->se_initialized = false;
@@ -693,10 +652,10 @@ int tf_init(struct tf_comm *comm)
if (tf_crypto_init() != PUBLIC_CRYPTO_OPERATION_SUCCESS)
return -EFAULT;
- if (omap_type() == OMAP2_DEVICE_TYPE_GP) {
- register_smc_public_crypto_digest();
- register_smc_public_crypto_aes();
- }
+ pr_info("%s\n", S_VERSION_STRING);
+
+ register_smc_public_crypto_digest();
+ register_smc_public_crypto_aes();
return 0;
}
@@ -705,11 +664,9 @@ int tf_init(struct tf_comm *comm)
int tf_start(struct tf_comm *comm,
u32 workspace_addr, u32 workspace_size,
u8 *pa_buffer, u32 pa_size,
- u8 *properties_buffer, u32 properties_length)
+ u32 conf_descriptor, u32 conf_offset, u32 conf_size)
{
- struct tf_init_buffer *init_shared_buffer = NULL;
struct tf_l1_shared_buffer *l1_shared_buffer = NULL;
- u32 l1_shared_buffer_descr;
struct tf_ns_pa_info pa_info;
int ret;
u32 descr;
@@ -725,37 +682,26 @@ int tf_start(struct tf_comm *comm,
sched_getaffinity(0, &saved_cpu_mask);
ret_affinity = sched_setaffinity(0, &local_cpu_mask);
if (ret_affinity != 0)
- dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
+ dpr_err("sched_setaffinity #1 -> 0x%lX", ret_affinity);
#endif
- tf_l4sec_clkdm_wakeup(true);
-
workspace_size -= SZ_1M;
sdp_backing_store_addr = workspace_addr + workspace_size;
workspace_size -= 0x20000;
sdp_bkext_store_addr = workspace_addr + workspace_size;
- /*
- * Implementation notes:
- *
- * 1/ The PA buffer (pa_buffer)is now owned by this function.
- * In case of error, it is responsible for releasing the buffer.
- *
- * 2/ The PA Info and PA Buffer will be freed through a RPC call
- * at the beginning of the PA entry in the SE.
- */
if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
- dprintk(KERN_ERR "tf_start(%p): "
- "The SMC PA is already started\n", comm);
+ dpr_err("%s(%p): The SMC PA is already started\n",
+ __func__, comm);
ret = -EFAULT;
goto error1;
}
if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) {
- dprintk(KERN_ERR "tf_start(%p): "
- "The L1 structure size is incorrect!\n", comm);
+ dpr_err("%s(%p): The L1 structure size is incorrect!\n",
+ __func__, comm);
ret = -EFAULT;
goto error1;
}
@@ -763,31 +709,17 @@ int tf_start(struct tf_comm *comm,
ret = tf_se_init(comm, sdp_backing_store_addr,
sdp_bkext_store_addr);
if (ret != 0) {
- dprintk(KERN_ERR "tf_start(%p): "
- "SE initialization failed\n", comm);
+ dpr_err("%s(%p): SE initialization failed\n", __func__, comm);
goto error1;
}
- init_shared_buffer =
- (struct tf_init_buffer *)
- internal_get_zeroed_page(GFP_KERNEL);
- if (init_shared_buffer == NULL) {
- dprintk(KERN_ERR "tf_start(%p): "
- "Ouf of memory!\n", comm);
-
- ret = -ENOMEM;
- goto error1;
- }
- /* Ensure the page is mapped */
- __set_page_locked(virt_to_page(init_shared_buffer));
l1_shared_buffer =
(struct tf_l1_shared_buffer *)
internal_get_zeroed_page(GFP_KERNEL);
if (l1_shared_buffer == NULL) {
- dprintk(KERN_ERR "tf_start(%p): "
- "Ouf of memory!\n", comm);
+ dpr_err("%s(%p): Ouf of memory!\n", __func__, comm);
ret = -ENOMEM;
goto error1;
@@ -795,73 +727,56 @@ int tf_start(struct tf_comm *comm,
/* Ensure the page is mapped */
__set_page_locked(virt_to_page(l1_shared_buffer));
- dprintk(KERN_INFO "tf_start(%p): "
- "L0SharedBuffer={0x%08x, 0x%08x}\n", comm,
- (u32) init_shared_buffer, (u32) __pa(init_shared_buffer));
- dprintk(KERN_INFO "tf_start(%p): "
- "L1SharedBuffer={0x%08x, 0x%08x}\n", comm,
+ dpr_info("%s(%p): L1SharedBuffer={0x%08x, 0x%08x}\n",
+ __func__, comm,
(u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer));
descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer,
current->mm);
- l1_shared_buffer_descr = (
- ((u32) __pa(l1_shared_buffer) & 0xFFFFF000) |
- (descr & 0xFFF));
+ pa_info.certificate = (void *) workspace_addr;
+ pa_info.parameters = (void *) __pa(l1_shared_buffer);
+ pa_info.results = (void *) __pa(l1_shared_buffer);
- pa_info.certificate = (void *) __pa(pa_buffer);
- pa_info.parameters = (void *) __pa(init_shared_buffer);
- pa_info.results = (void *) __pa(init_shared_buffer);
+ l1_shared_buffer->l1_shared_buffer_descr = descr & 0xFFF;
- init_shared_buffer->l1_shared_buffer_descr = l1_shared_buffer_descr;
+ l1_shared_buffer->backing_store_addr = sdp_backing_store_addr;
+ l1_shared_buffer->backext_storage_addr = sdp_bkext_store_addr;
+ l1_shared_buffer->workspace_addr = workspace_addr;
+ l1_shared_buffer->workspace_size = workspace_size;
- init_shared_buffer->backing_store_addr = sdp_backing_store_addr;
- init_shared_buffer->backext_storage_addr = sdp_bkext_store_addr;
- init_shared_buffer->workspace_addr = workspace_addr;
- init_shared_buffer->workspace_size = workspace_size;
+ dpr_info("%s(%p): System Configuration (%d bytes)\n",
+ __func__, comm, conf_size);
+ dpr_info("%s(%p): Starting PA (%d bytes)...\n",
+ __func__, comm, pa_size);
- init_shared_buffer->properties_length = properties_length;
- if (properties_length == 0) {
- init_shared_buffer->properties_buffer[0] = 0;
- } else {
- /* Test for overflow */
- if ((init_shared_buffer->properties_buffer +
- properties_length
- > init_shared_buffer->properties_buffer) &&
- (properties_length <=
- init_shared_buffer->properties_length)) {
- memcpy(init_shared_buffer->properties_buffer,
- properties_buffer,
- properties_length);
- } else {
- dprintk(KERN_INFO "tf_start(%p): "
- "Configuration buffer size from userland is "
- "incorrect(%d, %d)\n",
- comm, (u32) properties_length,
- init_shared_buffer->properties_length);
- ret = -EFAULT;
- goto error1;
- }
- }
-
- dprintk(KERN_INFO "tf_start(%p): "
- "System Configuration (%d bytes)\n", comm,
- init_shared_buffer->properties_length);
- dprintk(KERN_INFO "tf_start(%p): "
- "Starting PA (%d bytes)...\n", comm, pa_size);
/*
* Make sure all data is visible to the secure world
*/
- dmac_flush_range((void *)init_shared_buffer,
- (void *)(((u32)init_shared_buffer) + PAGE_SIZE));
- outer_clean_range(__pa(init_shared_buffer),
- __pa(init_shared_buffer) + PAGE_SIZE);
-
- dmac_flush_range((void *)pa_buffer,
- (void *)(pa_buffer + pa_size));
- outer_clean_range(__pa(pa_buffer),
- __pa(pa_buffer) + pa_size);
+ dmac_flush_range((void *)l1_shared_buffer,
+ (void *)(((u32)l1_shared_buffer) + PAGE_SIZE));
+ outer_clean_range(__pa(l1_shared_buffer),
+ __pa(l1_shared_buffer) + PAGE_SIZE);
+
+ if (pa_size > workspace_size) {
+ dpr_err("%s(%p): PA size is incorrect (%x)\n",
+ __func__, comm, pa_size);
+ ret = -EFAULT;
+ goto error1;
+ }
+ {
+ void *tmp;
+ tmp = ioremap_nocache(workspace_addr, pa_size);
+ if (copy_from_user(tmp, pa_buffer, pa_size)) {
+ iounmap(tmp);
+ dpr_err("%s(%p): Cannot access PA buffer (%p)\n",
+ __func__, comm, (void *) pa_buffer);
+ ret = -EFAULT;
+ goto error1;
+ }
+ iounmap(tmp);
+ }
dmac_flush_range((void *)&pa_info,
(void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info)));
outer_clean_range(__pa(&pa_info),
@@ -869,10 +784,11 @@ int tf_start(struct tf_comm *comm,
wmb();
spin_lock(&(comm->lock));
- comm->init_shared_buffer = init_shared_buffer;
- comm->pBuffer = l1_shared_buffer;
+ comm->l1_buffer = l1_shared_buffer;
+ comm->l1_buffer->conf_descriptor = conf_descriptor;
+ comm->l1_buffer->conf_offset = conf_offset;
+ comm->l1_buffer->conf_size = conf_size;
spin_unlock(&(comm->lock));
- init_shared_buffer = NULL;
l1_shared_buffer = NULL;
/*
@@ -882,8 +798,7 @@ int tf_start(struct tf_comm *comm,
tf_set_current_time(comm);
/* Workaround for issue #6081 */
- if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
- disable_nonboot_cpus();
+ disable_nonboot_cpus();
/*
* Start the SMC PA
@@ -892,8 +807,7 @@ int tf_start(struct tf_comm *comm,
FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
__pa(&pa_info), 0, 0, 0);
if (ret != API_HAL_RET_VALUE_OK) {
- printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n",
- ret);
+ pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
goto error2;
}
@@ -902,31 +816,28 @@ loop:
mutex_lock(&(comm->rpc_mutex));
if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
- dprintk(KERN_INFO "tf_rpc_execute: "
- "Executing CMD=0x%x\n",
- g_RPC_parameters[1]);
+ dpr_info("%s: Executing CMD=0x%x\n",
+ __func__, comm->l1_buffer->rpc_command);
- switch (g_RPC_parameters[1]) {
+ switch (comm->l1_buffer->rpc_command) {
case RPC_CMD_YIELD:
- dprintk(KERN_INFO "tf_rpc_execute: "
- "RPC_CMD_YIELD\n");
+ dpr_info("%s: RPC_CMD_YIELD\n", __func__);
set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
&(comm->flags));
- g_RPC_parameters[0] = RPC_SUCCESS;
+ comm->l1_buffer->rpc_status = RPC_SUCCESS;
break;
case RPC_CMD_INIT:
- dprintk(KERN_INFO "tf_rpc_execute: "
- "RPC_CMD_INIT\n");
- g_RPC_parameters[0] = tf_rpc_init(comm);
+ dpr_info("%s: RPC_CMD_INIT\n", __func__);
+ comm->l1_buffer->rpc_status = tf_rpc_init(comm);
break;
case RPC_CMD_TRACE:
- g_RPC_parameters[0] = tf_rpc_trace(comm);
+ comm->l1_buffer->rpc_status = tf_rpc_trace(comm);
break;
default:
- g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS;
+ comm->l1_buffer->rpc_status = RPC_ERROR_BAD_PARAMETERS;
break;
}
g_RPC_advancement = RPC_ADVANCEMENT_FINISHED;
@@ -934,10 +845,9 @@ loop:
mutex_unlock(&(comm->rpc_mutex));
- ret = tf_schedule_secure_world(comm, false);
+ ret = tf_schedule_secure_world(comm);
if (ret != 0) {
- printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n",
- ret);
+ pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
goto error2;
}
@@ -948,39 +858,21 @@ loop:
wake_up(&(comm->wait_queue));
ret = 0;
- #if 0
- {
- void *workspace_va;
- workspace_va = ioremap(workspace_addr, workspace_size);
- printk(KERN_INFO
- "Read first word of workspace [0x%x]\n",
- *(uint32_t *)workspace_va);
- }
- #endif
-
/* Workaround for issue #6081 */
- if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
- enable_nonboot_cpus();
+ enable_nonboot_cpus();
goto exit;
error2:
/* Workaround for issue #6081 */
- if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS)
- enable_nonboot_cpus();
+ enable_nonboot_cpus();
spin_lock(&(comm->lock));
- l1_shared_buffer = comm->pBuffer;
- init_shared_buffer = comm->init_shared_buffer;
- comm->pBuffer = NULL;
- comm->init_shared_buffer = NULL;
+ l1_shared_buffer = comm->l1_buffer;
+ comm->l1_buffer = NULL;
spin_unlock(&(comm->lock));
error1:
- if (init_shared_buffer != NULL) {
- __clear_page_locked(virt_to_page(init_shared_buffer));
- internal_free_page((unsigned long) init_shared_buffer);
- }
if (l1_shared_buffer != NULL) {
__clear_page_locked(virt_to_page(l1_shared_buffer));
internal_free_page((unsigned long) l1_shared_buffer);
@@ -990,11 +882,9 @@ exit:
#ifdef CONFIG_SMP
ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
if (ret_affinity != 0)
- dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
+ dpr_err("sched_setaffinity #2 -> 0x%lX", ret_affinity);
#endif
- tf_l4sec_clkdm_allow_idle(true);
-
if (ret > 0)
ret = -EFAULT;
@@ -1003,7 +893,7 @@ exit:
void tf_terminate(struct tf_comm *comm)
{
- dprintk(KERN_INFO "tf_terminate(%p)\n", comm);
+ dpr_info("%s(%p)\n", __func__, comm);
spin_lock(&(comm->lock));