aboutsummaryrefslogtreecommitdiffstats
path: root/security/smc
diff options
context:
space:
mode:
authorKen Sumrall <ksumrall@android.com>2011-05-17 11:57:50 -0700
committerColin Cross <ccross@android.com>2011-06-14 11:42:28 -0700
commit170572129475692006aba272e05fe9813c7cc5ab (patch)
tree4e256e5790a4f0dbc36c38039b1c7068129146af /security/smc
parentd076b25c4d24b6b8f6eb6844fe81190dcb607dac (diff)
downloadkernel_samsung_espresso10-170572129475692006aba272e05fe9813c7cc5ab.zip
kernel_samsung_espresso10-170572129475692006aba272e05fe9813c7cc5ab.tar.gz
kernel_samsung_espresso10-170572129475692006aba272e05fe9813c7cc5ab.tar.bz2
OMAP4 crypto: Add support for omap4 hardware dm-crypt
Add the omap4 crypto driver from Trusted Logic, ported to 2.6.39, and much code removed as we don't intend to run with the protected app. Change-Id: I176b27a222b7295b838a954e9b7f5397b23683be Signed-off-by: Ken Sumrall <ksumrall@android.com>
Diffstat (limited to 'security/smc')
-rw-r--r--security/smc/Kconfig45
-rw-r--r--security/smc/Makefile3
-rw-r--r--security/smc/omap4/Makefile35
-rw-r--r--security/smc/omap4/scx_protocol.h676
-rw-r--r--security/smc/omap4/scx_public_crypto.c355
-rw-r--r--security/smc/omap4/scx_public_crypto.h348
-rw-r--r--security/smc/omap4/scx_public_crypto_AES.c1180
-rw-r--r--security/smc/omap4/scx_public_crypto_Digest.c964
-rw-r--r--security/smc/omap4/scx_public_dma.c137
-rw-r--r--security/smc/omap4/scx_public_dma.h78
-rw-r--r--security/smc/omap4/scxlnx_comm_mshield.c97
-rw-r--r--security/smc/omap4/scxlnx_defs.h539
-rw-r--r--security/smc/omap4/scxlnx_device.c89
-rw-r--r--security/smc/omap4/scxlnx_mshield.h44
-rw-r--r--security/smc/omap4/scxlnx_util.c45
-rw-r--r--security/smc/omap4/scxlnx_util.h102
16 files changed, 4737 insertions, 0 deletions
diff --git a/security/smc/Kconfig b/security/smc/Kconfig
new file mode 100644
index 0000000..9fcd1f6
--- /dev/null
+++ b/security/smc/Kconfig
@@ -0,0 +1,45 @@
+config TF_MSHIELD
+ bool
+
+config SECURITY_MIDDLEWARE_COMPONENT
+ bool "Enable SMC Driver"
+ depends on ARCH_OMAP3 || ARCH_OMAP4
+ default n
+ select TF_MSHIELD
+ help
+ This option adds kernel support for communication with the SMC
+ Protected Application.
+
+ If you are unsure how to answer this question, answer N.
+
+config SMC_KERNEL_CRYPTO
+ bool "Register SMC into kernel crypto subsytem"
+ depends on SECURITY_MIDDLEWARE_COMPONENT
+ default y
+ help
+ This option enables crypto subsystem to use SMC and OMAP hardware
+ accelerators.
+
+ If you are unsure how to answer this question, answer Y.
+
+config SECURE_TRACE
+ bool "Enable SMC secure traces"
+ depends on SECURITY_MIDDLEWARE_COMPONENT && ARCH_OMAP4
+ default y
+ help
+ This option enables traces from the SMC Protected Application to be
+ displayed in kernel logs.
+
+config TF_DRIVER_DEBUG_SUPPORT
+ bool "Debug support"
+ depends on SECURITY_MIDDLEWARE_COMPONENT
+ default n
+ help
+ This options enables debug traces in the driver.
+
+config SMC_BENCH_SECURE_CYCLE
+ bool "Enable secure cycles benchmarks"
+ depends on TF_DRIVER_DEBUG_SUPPORT && ARCH_OMAP4
+ default n
+ help
+ This options enables benchmarks.
diff --git a/security/smc/Makefile b/security/smc/Makefile
new file mode 100644
index 0000000..80cf430
--- /dev/null
+++ b/security/smc/Makefile
@@ -0,0 +1,3 @@
+ifeq ($(CONFIG_SECURITY_MIDDLEWARE_COMPONENT),y)
+obj-$(CONFIG_ARCH_OMAP4) += omap4/
+endif
diff --git a/security/smc/omap4/Makefile b/security/smc/omap4/Makefile
new file mode 100644
index 0000000..de75cc2
--- /dev/null
+++ b/security/smc/omap4/Makefile
@@ -0,0 +1,35 @@
+#
+# Copyright (c) 2006-2010 Trusted Logic S.A.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+# MA 02111-1307 USA
+#
+
+ifdef S_VERSION_BUILD
+EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD)
+endif
+
+EXTRA_CFLAGS += -Iarch/arm/mach-omap2
+
+tf_driver-objs += scxlnx_util.o
+tf_driver-objs += scxlnx_device.o
+tf_driver-objs += scx_public_crypto.o
+tf_driver-objs += scx_public_crypto_Digest.o
+tf_driver-objs += scx_public_crypto_AES.o
+tf_driver-objs += scx_public_dma.o
+tf_driver-objs += scxlnx_comm_mshield.o
+
+obj-$(CONFIG_SECURITY_MIDDLEWARE_COMPONENT) += tf_driver.o
diff --git a/security/smc/omap4/scx_protocol.h b/security/smc/omap4/scx_protocol.h
new file mode 100644
index 0000000..80653eb
--- /dev/null
+++ b/security/smc/omap4/scx_protocol.h
@@ -0,0 +1,676 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCX_PROTOCOL_H__
+#define __SCX_PROTOCOL_H__
+
+/*----------------------------------------------------------------------------
+ *
+ * This header file defines the structure used in the SChannel Protocol.
+ * See your Product Reference Manual for a specification of the SChannel
+ * protocol.
+ *---------------------------------------------------------------------------*/
+
+/*
+ * The driver interface version returned by the version ioctl
+ */
+#define SCX_DRIVER_INTERFACE_VERSION 0x04000000
+
+/*
+ * Protocol version handling
+ */
+#define SCX_S_PROTOCOL_MAJOR_VERSION (0x06)
+#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
+#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
+
+/*
+ * The size, in bytes, of the L1 Shared Buffer.
+ */
+#define SCX_COMM_BUFFER_SIZE (0x1000) /* 4kB*/
+
+/*
+ * The S flag of the nConfigFlags_S register.
+ */
+#define SCX_CONFIG_FLAG_S (1 << 3)
+
+/*
+ * The TimeSlot field of the nSyncSerial_N register.
+ */
+#define SCX_SYNC_SERIAL_TIMESLOT_N (1)
+
+/*
+ * nStatus_S related defines.
+ */
+#define SCX_STATUS_P_MASK (0X00000001)
+#define SCX_STATUS_POWER_STATE_SHIFT (3)
+#define SCX_STATUS_POWER_STATE_MASK (0x1F << SCX_STATUS_POWER_STATE_SHIFT)
+
+/*
+ * Possible power states of the POWER_STATE field of the nStatus_S register
+ */
+#define SCX_POWER_MODE_COLD_BOOT (0)
+#define SCX_POWER_MODE_WARM_BOOT (1)
+#define SCX_POWER_MODE_ACTIVE (3)
+#define SCX_POWER_MODE_READY_TO_SHUTDOWN (5)
+#define SCX_POWER_MODE_READY_TO_HIBERNATE (7)
+#define SCX_POWER_MODE_WAKEUP (8)
+#define SCX_POWER_MODE_PANIC (15)
+
+/*
+ * Possible nCommand values for MANAGEMENT commands
+ */
+#define SCX_MANAGEMENT_HIBERNATE (1)
+#define SCX_MANAGEMENT_SHUTDOWN (2)
+#define SCX_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
+#define SCX_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
+
+/*
+ * The capacity of the Normal Word message queue, in number of slots.
+ */
+#define SCX_N_MESSAGE_QUEUE_CAPACITY (512)
+
+/*
+ * The capacity of the Secure World message answer queue, in number of slots.
+ */
+#define SCX_S_ANSWER_QUEUE_CAPACITY (256)
+
+/*
+ * The value of the S-timeout register indicating an infinite timeout.
+ */
+#define SCX_S_TIMEOUT_0_INFINITE (0xFFFFFFFF)
+#define SCX_S_TIMEOUT_1_INFINITE (0xFFFFFFFF)
+
+/*
+ * The value of the S-timeout register indicating an immediate timeout.
+ */
+#define SCX_S_TIMEOUT_0_IMMEDIATE (0x0)
+#define SCX_S_TIMEOUT_1_IMMEDIATE (0x0)
+
+/*
+ * Identifies the get protocol version SMC.
+ */
+#define SCX_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
+
+/*
+ * Identifies the init SMC.
+ */
+#define SCX_SMC_INIT (0XFFFFFFFF)
+
+/*
+ * Identifies the reset irq SMC.
+ */
+#define SCX_SMC_RESET_IRQ (0xFFFFFFFE)
+
+/*
+ * Identifies the SET_W3B SMC.
+ */
+#define SCX_SMC_WAKE_UP (0xFFFFFFFD)
+
+/*
+ * Identifies the STOP SMC.
+ */
+#define SCX_SMC_STOP (0xFFFFFFFC)
+
+/*
+ * Identifies the n-yield SMC.
+ */
+#define SCX_SMC_N_YIELD (0X00000003)
+
+
+/* Possible stop commands for SMC_STOP */
+#define SCSTOP_HIBERNATE (0xFFFFFFE1)
+#define SCSTOP_SHUTDOWN (0xFFFFFFE2)
+
+/*
+ * representation of an UUID.
+ */
+struct SCX_UUID {
+ u32 time_low;
+ u16 time_mid;
+ u16 time_hi_and_version;
+ u8 clock_seq_and_node[8];
+};
+
+
+/**
+ * Command parameters.
+ */
+struct SCX_COMMAND_PARAM_VALUE {
+ u32 a;
+ u32 b;
+};
+
+struct SCX_COMMAND_PARAM_TEMP_MEMREF {
+ u32 nDescriptor; /* data pointer for exchange message.*/
+ u32 nSize;
+ u32 nOffset;
+};
+
+struct SCX_COMMAND_PARAM_MEMREF {
+ u32 hBlock;
+ u32 nSize;
+ u32 nOffset;
+};
+
+union SCX_COMMAND_PARAM {
+ struct SCX_COMMAND_PARAM_VALUE sValue;
+ struct SCX_COMMAND_PARAM_TEMP_MEMREF sTempMemref;
+ struct SCX_COMMAND_PARAM_MEMREF sMemref;
+};
+
+/**
+ * Answer parameters.
+ */
+struct SCX_ANSWER_PARAM_VALUE {
+ u32 a;
+ u32 b;
+};
+
+struct SCX_ANSWER_PARAM_SIZE {
+ u32 _ignored;
+ u32 nSize;
+};
+
+union SCX_ANSWER_PARAM {
+ struct SCX_ANSWER_PARAM_SIZE sSize;
+ struct SCX_ANSWER_PARAM_VALUE sValue;
+};
+
+/*
+ * Descriptor tables capacity
+ */
+#define SCX_MAX_W3B_COARSE_PAGES (2)
+#define SCX_MAX_COARSE_PAGES (8)
+#define SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
+#define SCX_DESCRIPTOR_TABLE_CAPACITY \
+ (1 << SCX_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
+#define SCX_DESCRIPTOR_TABLE_CAPACITY_MASK \
+ (SCX_DESCRIPTOR_TABLE_CAPACITY - 1)
+/* Shared memories coarse pages can map up to 1MB */
+#define SCX_MAX_COARSE_PAGE_MAPPED_SIZE \
+ (PAGE_SIZE * SCX_DESCRIPTOR_TABLE_CAPACITY)
+/* Shared memories cannot exceed 8MB */
+#define SCX_MAX_SHMEM_SIZE \
+ (SCX_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
+
+/*
+ * Buffer size for version description fields
+ */
+#define SCX_DESCRIPTION_BUFFER_LENGTH 64
+
+/*
+ * Shared memory type flags.
+ */
+#define SCX_SHMEM_TYPE_READ (0x00000001)
+#define SCX_SHMEM_TYPE_WRITE (0x00000002)
+
+/*
+ * Shared mem flags
+ */
+#define SCX_SHARED_MEM_FLAG_INPUT 1
+#define SCX_SHARED_MEM_FLAG_OUTPUT 2
+#define SCX_SHARED_MEM_FLAG_INOUT 3
+
+
+/*
+ * Parameter types
+ */
+#define SCX_PARAM_TYPE_NONE 0x0
+#define SCX_PARAM_TYPE_VALUE_INPUT 0x1
+#define SCX_PARAM_TYPE_VALUE_OUTPUT 0x2
+#define SCX_PARAM_TYPE_VALUE_INOUT 0x3
+#define SCX_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5
+#define SCX_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
+#define SCX_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7
+#define SCX_PARAM_TYPE_MEMREF_INPUT 0xD
+#define SCX_PARAM_TYPE_MEMREF_OUTPUT 0xE
+#define SCX_PARAM_TYPE_MEMREF_INOUT 0xF
+
+#define SCX_PARAM_TYPE_MEMREF_FLAG 0x4
+#define SCX_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
+
+
+#define SCX_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
+ ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
+#define SCX_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
+
+/*
+ * Login types.
+ */
+#define SCX_LOGIN_PUBLIC 0x00000000
+#define SCX_LOGIN_USER 0x00000001
+#define SCX_LOGIN_GROUP 0x00000002
+#define SCX_LOGIN_APPLICATION 0x00000004
+#define SCX_LOGIN_APPLICATION_USER 0x00000005
+#define SCX_LOGIN_APPLICATION_GROUP 0x00000006
+#define SCX_LOGIN_AUTHENTICATION 0x80000000
+#define SCX_LOGIN_PRIVILEGED 0x80000002
+
+/* Login variants */
+
+#define SCX_LOGIN_VARIANT(mainType, os, variant) \
+ ((mainType) | (1 << 27) | ((os) << 16) | ((variant) << 8))
+
+#define SCX_LOGIN_GET_MAIN_TYPE(type) \
+ ((type) & ~SCX_LOGIN_VARIANT(0, 0xFF, 0xFF))
+
+#define SCX_LOGIN_OS_ANY 0x00
+#define SCX_LOGIN_OS_LINUX 0x01
+#define SCX_LOGIN_OS_ANDROID 0x04
+
+/* OS-independent variants */
+#define SCX_LOGIN_USER_NONE \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANY, 0xFF)
+#define SCX_LOGIN_GROUP_NONE \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANY, 0xFF)
+#define SCX_LOGIN_APPLICATION_USER_NONE \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANY, 0xFF)
+#define SCX_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_AUTHENTICATION, SCX_LOGIN_OS_ANY, 0x01)
+#define SCX_LOGIN_PRIVILEGED_KERNEL \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_PRIVILEGED, SCX_LOGIN_OS_ANY, 0x01)
+
+/* Linux variants */
+#define SCX_LOGIN_USER_LINUX_EUID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_GROUP_LINUX_GID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_LINUX, 0x01)
+#define SCX_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_LINUX, 0x01)
+
+/* Android variants */
+#define SCX_LOGIN_USER_ANDROID_EUID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_USER, SCX_LOGIN_OS_ANDROID, 0x01)
+#define SCX_LOGIN_GROUP_ANDROID_GID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_GROUP, SCX_LOGIN_OS_ANDROID, 0x01)
+#define SCX_LOGIN_APPLICATION_ANDROID_UID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION, SCX_LOGIN_OS_ANDROID, 0x01)
+#define SCX_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_USER, SCX_LOGIN_OS_ANDROID, \
+ 0x01)
+#define SCX_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
+ SCX_LOGIN_VARIANT(SCX_LOGIN_APPLICATION_GROUP, SCX_LOGIN_OS_ANDROID, \
+ 0x01)
+
+/*
+ * return origins
+ */
+#define SCX_ORIGIN_COMMS 2
+#define SCX_ORIGIN_TEE 3
+#define SCX_ORIGIN_TRUSTED_APP 4
+/*
+ * The SCX message types.
+ */
+#define SCX_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02
+#define SCX_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD
+#define SCX_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7
+#define SCX_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9
+#define SCX_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0
+#define SCX_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2
+#define SCX_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5
+#define SCX_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4
+#define SCX_MESSAGE_TYPE_MANAGEMENT 0xFE
+
+
+/*
+ * The error codes
+ */
+#define S_SUCCESS 0x00000000
+#define S_ERROR_NO_DATA 0xFFFF000B
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+
+
+struct SCX_COMMAND_HEADER {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo;
+ u32 nOperationID;
+};
+
+struct SCX_ANSWER_HEADER {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo;
+ u32 nOperationID;
+ u32 nErrorCode;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT command message.
+ */
+struct SCX_COMMAND_CREATE_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 nDeviceContextID;
+};
+
+/*
+ * CREATE_DEVICE_CONTEXT answer message.
+ */
+struct SCX_ANSWER_CREATE_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ /* an opaque Normal World identifier for the device context */
+ u32 hDeviceContext;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT command message.
+ */
+struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 hDeviceContext;
+};
+
+/*
+ * DESTROY_DEVICE_CONTEXT answer message.
+ */
+struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 nDeviceContextID;
+};
+
+/*
+ * OPEN_CLIENT_SESSION command message.
+ */
+struct SCX_COMMAND_OPEN_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nParamTypes;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 nCancellationID;
+ u64 sTimeout;
+ struct SCX_UUID sDestinationUUID;
+ union SCX_COMMAND_PARAM sParams[4];
+ u32 nLoginType;
+ /*
+ * Size = 0 for public, [16] for group identification, [20] for
+ * authentication
+ */
+ u8 sLoginData[20];
+};
+
+/*
+ * OPEN_CLIENT_SESSION answer message.
+ */
+struct SCX_ANSWER_OPEN_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u8 nReturnOrigin;
+ u8 __nReserved;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 hClientSession;
+ union SCX_ANSWER_PARAM sAnswers[4];
+};
+
+/*
+ * CLOSE_CLIENT_SESSION command message.
+ */
+struct SCX_COMMAND_CLOSE_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hClientSession;
+};
+
+/*
+ * CLOSE_CLIENT_SESSION answer message.
+ */
+struct SCX_ANSWER_CLOSE_CLIENT_SESSION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+};
+
+
+/*
+ * REGISTER_SHARED_MEMORY command message
+ */
+struct SCX_COMMAND_REGISTER_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMemoryFlags;
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 nBlockID;
+ u32 nSharedMemSize;
+ u32 nSharedMemStartOffset;
+ u32 nSharedMemDescriptors[SCX_MAX_COARSE_PAGES];
+};
+
+/*
+ * REGISTER_SHARED_MEMORY answer message.
+ */
+struct SCX_ANSWER_REGISTER_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 hBlock;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY command message.
+ */
+struct SCX_COMMAND_RELEASE_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hBlock;
+};
+
+/*
+ * RELEASE_SHARED_MEMORY answer message.
+ */
+struct SCX_ANSWER_RELEASE_SHARED_MEMORY {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 nErrorCode;
+ u32 nBlockID;
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command message.
+ */
+struct SCX_COMMAND_INVOKE_CLIENT_COMMAND {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nParamTypes;
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hClientSession;
+ u64 sTimeout;
+ u32 nCancellationID;
+ u32 nClientCommandIdentifier;
+ union SCX_COMMAND_PARAM sParams[4];
+};
+
+/*
+ * INVOKE_CLIENT_COMMAND command answer.
+ */
+struct SCX_ANSWER_INVOKE_CLIENT_COMMAND {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u8 nReturnOrigin;
+ u8 __nReserved;
+ u32 nOperationID;
+ u32 nErrorCode;
+ union SCX_ANSWER_PARAM sAnswers[4];
+};
+
+/*
+ * CANCEL_CLIENT_OPERATION command message.
+ */
+struct SCX_COMMAND_CANCEL_CLIENT_OPERATION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ /* an opaque Normal World identifier for the operation */
+ u32 nOperationID;
+ u32 hDeviceContext;
+ u32 hClientSession;
+ u32 nCancellationID;
+};
+
+struct SCX_ANSWER_CANCEL_CLIENT_OPERATION {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nMessageInfo_RFU;
+ u32 nOperationID;
+ u32 nErrorCode;
+};
+
+/*
+ * MANAGEMENT command message.
+ */
+struct SCX_COMMAND_MANAGEMENT {
+ u8 nMessageSize;
+ u8 nMessageType;
+ u16 nCommand;
+ u32 nOperationID;
+ u32 nW3BSize;
+ u32 nW3BStartOffset;
+ u32 nSharedMemDescriptors[1];
+};
+
+/*
+ * POWER_MANAGEMENT answer message.
+ * The message does not provide message specific parameters.
+ * Therefore no need to define a specific answer structure
+ */
+
+/*
+ * Structure for L2 messages
+ */
+union SCX_COMMAND_MESSAGE {
+ struct SCX_COMMAND_HEADER sHeader;
+ struct SCX_COMMAND_CREATE_DEVICE_CONTEXT sCreateDeviceContextMessage;
+ struct SCX_COMMAND_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextMessage;
+ struct SCX_COMMAND_OPEN_CLIENT_SESSION sOpenClientSessionMessage;
+ struct SCX_COMMAND_CLOSE_CLIENT_SESSION sCloseClientSessionMessage;
+ struct SCX_COMMAND_REGISTER_SHARED_MEMORY sRegisterSharedMemoryMessage;
+ struct SCX_COMMAND_RELEASE_SHARED_MEMORY sReleaseSharedMemoryMessage;
+ struct SCX_COMMAND_INVOKE_CLIENT_COMMAND sInvokeClientCommandMessage;
+ struct SCX_COMMAND_CANCEL_CLIENT_OPERATION
+ sCancelClientOperationMessage;
+ struct SCX_COMMAND_MANAGEMENT sManagementMessage;
+};
+
+/*
+ * Structure for any L2 answer
+ */
+
+union SCX_ANSWER_MESSAGE {
+ struct SCX_ANSWER_HEADER sHeader;
+ struct SCX_ANSWER_CREATE_DEVICE_CONTEXT sCreateDeviceContextAnswer;
+ struct SCX_ANSWER_OPEN_CLIENT_SESSION sOpenClientSessionAnswer;
+ struct SCX_ANSWER_CLOSE_CLIENT_SESSION sCloseClientSessionAnswer;
+ struct SCX_ANSWER_REGISTER_SHARED_MEMORY sRegisterSharedMemoryAnswer;
+ struct SCX_ANSWER_RELEASE_SHARED_MEMORY sReleaseSharedMemoryAnswer;
+ struct SCX_ANSWER_INVOKE_CLIENT_COMMAND sInvokeClientCommandAnswer;
+ struct SCX_ANSWER_DESTROY_DEVICE_CONTEXT sDestroyDeviceContextAnswer;
+ struct SCX_ANSWER_CANCEL_CLIENT_OPERATION sCancelClientOperationAnswer;
+};
+
+/* Structure of the Communication Buffer */
+struct SCHANNEL_C1S_BUFFER {
+ u32 nConfigFlags_S;
+ u32 nW3BSizeMax_S;
+ u32 nReserved0;
+ u32 nW3BSizeCurrent_S;
+ u8 sReserved1[48];
+ u8 sVersionDescription[SCX_DESCRIPTION_BUFFER_LENGTH];
+ u32 nStatus_S;
+ u32 sReserved2;
+ u32 nSyncSerial_N;
+ u32 nSyncSerial_S;
+ u64 sTime_N[2];
+ u64 sTimeout_S[2];
+ u32 nFirstCommand;
+ u32 nFirstFreeCommand;
+ u32 nFirstAnswer;
+ u32 nFirstFreeAnswer;
+ u32 nW3BDescriptors[128];
+ #ifdef CONFIG_TF_MSHIELD
+ u8 sRPCTraceBuffer[140];
+ u8 sRPCShortcutBuffer[180];
+ #else
+ u8 sReserved3[320];
+ #endif
+ u32 sCommandQueue[SCX_N_MESSAGE_QUEUE_CAPACITY];
+ u32 sAnswerQueue[SCX_S_ANSWER_QUEUE_CAPACITY];
+};
+
+
+/*
+ * SCX_VERSION_INFORMATION_BUFFER structure description
+ * Description of the sVersionBuffer handed over from user space to kernel space
+ * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
+ * and handed back to user space
+ */
+struct SCX_VERSION_INFORMATION_BUFFER {
+ u8 sDriverDescription[65];
+ u8 sSecureWorldDescription[65];
+};
+
+
+/* The IOCTLs the driver supports */
+#include <linux/ioctl.h>
+
+#define IOCTL_SCX_GET_VERSION _IO('z', 0)
+#define IOCTL_SCX_EXCHANGE _IOWR('z', 1, union SCX_COMMAND_MESSAGE)
+#define IOCTL_SCX_GET_DESCRIPTION _IOR('z', 2, \
+ struct SCX_VERSION_INFORMATION_BUFFER)
+
+#endif /* !defined(__SCX_PROTOCOL_H__) */
diff --git a/security/smc/omap4/scx_public_crypto.c b/security/smc/omap4/scx_public_crypto.c
new file mode 100644
index 0000000..d6b751c
--- /dev/null
+++ b/security/smc/omap4/scx_public_crypto.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scxlnx_mshield.h"
+#include "scx_public_crypto.h"
+#include "scx_public_dma.h"
+
+#define IO_ADDRESS OMAP2_L4_IO_ADDRESS
+
+#define S_SUCCESS 0x00000000
+#define S_ERROR_GENERIC 0xFFFF0000
+#define S_ERROR_ACCESS_DENIED 0xFFFF0001
+#define S_ERROR_BAD_FORMAT 0xFFFF0005
+#define S_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define S_ERROR_SHORT_BUFFER 0xFFFF0010
+#define S_ERROR_UNREACHABLE 0xFFFF3013
+#define S_ERROR_SERVICE 0xFFFF1000
+
+#define CKR_OK 0x00000000
+
+#define PUBLIC_CRYPTO_TIMEOUT_CONST 0x000FFFFF
+
+#define RPC_AES1_CODE PUBLIC_CRYPTO_HWA_AES1
+#define RPC_AES2_CODE PUBLIC_CRYPTO_HWA_AES2
+#define RPC_DES_CODE PUBLIC_CRYPTO_HWA_DES
+#define RPC_SHA_CODE PUBLIC_CRYPTO_HWA_SHA
+
+#define RPC_CRYPTO_COMMAND_MASK 0x000003c0
+
+#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR 0x200
+#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_UNLOCK 0x000
+#define RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_LOCK 0x001
+
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT 0x240
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_AES1 RPC_AES1_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_AES2 RPC_AES2_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_DES RPC_DES_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_LOCK_SHA RPC_SHA_CODE
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_SUSPEND 0x010
+#define RPC_LOCK_ACCELERATORS_SUSPEND_SHORTCUT_UNINSTALL 0x020
+
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS 0x280
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES1 RPC_AES1_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_AES2 RPC_AES2_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_DES RPC_DES_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_UNLOCK_SHA RPC_SHA_CODE
+#define RPC_RESUME_SHORTCUT_UNLOCK_ACCELERATORS_RESUME 0x010
+
+#define RPC_CLEAR_GLOBAL_KEY_CONTEXT 0x2c0
+#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_AES 0x001
+#define RPC_CLEAR_GLOBAL_KEY_CONTEXT_CLEARED_DES 0x002
+
+#define ENABLE_CLOCK true
+#define DISABLE_CLOCK false
+
+/*---------------------------------------------------------------------------*/
+/*RPC IN/OUT structures for CUS implementation */
+/*---------------------------------------------------------------------------*/
+
+struct RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_OUT {
+ u32 nShortcutID;
+ u32 nError;
+};
+
+struct RPC_INSTALL_SHORTCUT_LOCK_ACCELERATOR_IN {
+ u32 nDeviceContextID;
+ u32 hClientSession;
+ u32 nCommandID;
+ u32 hKeyContext;
+ /**
+ *The identifier of the HWA accelerator that this shortcut uses!
+ *Possible values are:
+ *- 1 (RPC_AES1_CODE)
+ *- 2 (RPC_AES2_CODE)
+ *- 4 (RPC_DES_CODE)
+ *- 8 (RPC_SHA_CODE)
+ **/
+ u32 nHWAID;
+ /**
+ *This field defines the algorithm, direction, mode, key size.
+ *It contains some of the bits of the corresponding "CTRL" register
+ *of the accelerator.
+ *
+ *More precisely:
+ *For AES1 accelerator, nHWA_CTRL contains the following bits:
+ *- CTR (bit 6):
+ * when 1, selects CTR mode.
+ * when 0, selects CBC or ECB mode (according to CBC bit)
+ *- CBC (bit 5)
+ * when 1, selects CBC mode (but only if CTR=0)
+ * when 0, selects EBC mode (but only if CTR=0)
+ *- DIRECTION (bit 2)
+ * 0: decryption
+ * 1: encryption
+ *
+ *For the DES2 accelerator, nHWA_CTRL contains the following bits:
+ *- CBC (bit 4): 1 for CBC, 0 for ECB
+ *- DIRECTION (bit 2): 0 for decryption, 1 for encryption
+ *
+ *For the SHA accelerator, nHWA_CTRL contains the following bits:
+ *- ALGO (bit 2:1):
+ * 0x0: MD5
+ * 0x1: SHA1
+ * 0x2: SHA-224
+ * 0x3: SHA-256
+ **/
+ u32 nHWA_CTRL;
+ union PUBLIC_CRYPTO_OPERATION_STATE sOperationState;
+};
+
+struct RPC_LOCK_HWA_SUSPEND_SHORTCUT_OUT {
+ union PUBLIC_CRYPTO_OPERATION_STATE sOperationState;
+};
+
+struct RPC_LOCK_HWA_SUSPEND_SHORTCUT_IN {
+ u32 nShortcutID;
+};
+
+struct RPC_RESUME_SHORTCUT_UNLOCK_HWA_IN {
+ u32 nShortcutID;
+ u32 hAES1KeyContext;
+ u32 hAES2KeyContext;
+ u32 hDESKeyContext;
+ union PUBLIC_CRYPTO_OPERATION_STATE sOperationState;
+};
+
+/*------------------------------------------------------------------------- */
+/*
+ * HWA public lock or unlock one HWA according algo specified by nHWAID
+ */
+void PDrvCryptoLockUnlockHWA(u32 nHWAID, bool bDoLock)
+{
+ int is_sem = 0;
+ struct semaphore *s = NULL;
+ struct mutex *m = NULL;
+ struct SCXLNX_DEVICE *dev = SCXLNXGetDevice();
+
+ dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA:nHWAID=0x%04X bDoLock=%d\n",
+ nHWAID, bDoLock);
+
+ switch (nHWAID) {
+ case RPC_AES1_CODE:
+ s = &dev->sAES1CriticalSection;
+ is_sem = 1;
+ break;
+ case RPC_AES2_CODE:
+ s = &dev->sAES2CriticalSection;
+ is_sem = 1;
+ break;
+ case RPC_DES_CODE:
+ m = &dev->sDESCriticalSection;
+ break;
+ default:
+ case RPC_SHA_CODE:
+ m = &dev->sSHACriticalSection;
+ break;
+ }
+
+ if (bDoLock == LOCK_HWA) {
+ dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA: "
+ "Wait for HWAID=0x%04X\n", nHWAID);
+ if (is_sem) {
+ while (down_trylock(s))
+ cpu_relax();
+ } else {
+ while (!mutex_trylock(m))
+ cpu_relax();
+ }
+ dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA: "
+ "Locked on HWAID=0x%04X\n", nHWAID);
+ } else {
+ if (is_sem)
+ up(s);
+ else
+ mutex_unlock(m);
+ dprintk(KERN_INFO "PDrvCryptoLockUnlockHWA: "
+ "Released for HWAID=0x%04X\n", nHWAID);
+ }
+}
+
+/*------------------------------------------------------------------------- */
+/**
+ *Initialize the public crypto DMA channels, global HWA semaphores and handles
+ */
+u32 SCXPublicCryptoInit(void)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+ u32 nError = PUBLIC_CRYPTO_OPERATION_SUCCESS;
+
+ /* Initialize HWAs */
+ PDrvCryptoAESInit();
+ PDrvCryptoDigestInit();
+
+ /*initialize the HWA semaphores */
+ sema_init(&pDevice->sAES1CriticalSection, 1);
+ sema_init(&pDevice->sAES2CriticalSection, 1);
+ mutex_init(&pDevice->sSHACriticalSection);
+
+ /*initialize the current key handle loaded in the AESn/DES HWA */
+ pDevice->hAES1SecureKeyContext = 0;
+ pDevice->hAES2SecureKeyContext = 0;
+ pDevice->bSHAM1IsPublic = false;
+
+ /*initialize the DMA semaphores */
+ mutex_init(&pDevice->sm.sDMALock);
+
+ /*allocate DMA buffer */
+ pDevice->nDMABufferLength = PAGE_SIZE * 16;
+ pDevice->pDMABuffer = dma_alloc_coherent(NULL,
+ pDevice->nDMABufferLength,
+ &(pDevice->pDMABufferPhys),
+ GFP_KERNEL);
+ if (pDevice->pDMABuffer == NULL) {
+ printk(KERN_ERR
+ "SCXPublicCryptoInit: Out of memory for DMA buffer\n");
+ nError = S_ERROR_OUT_OF_MEMORY;
+ }
+
+ return nError;
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ *Initialize the device context CUS fields (shortcut semaphore and public CUS
+ *list)
+ */
+void SCXPublicCryptoInitDeviceContext(struct SCXLNX_CONNECTION *pDeviceContext)
+{
+ /*initialize the CUS list in the given device context */
+ spin_lock_init(&(pDeviceContext->shortcutListCriticalSectionLock));
+ INIT_LIST_HEAD(&(pDeviceContext->ShortcutList));
+}
+
+/*------------------------------------------------------------------------- */
+/**
+ *Terminate the public crypto (including DMA)
+ */
+void SCXPublicCryptoTerminate()
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ if (pDevice->pDMABuffer != NULL) {
+ dma_free_coherent(NULL, pDevice->nDMABufferLength,
+ pDevice->pDMABuffer,
+ pDevice->pDMABufferPhys);
+ pDevice->pDMABuffer = NULL;
+ }
+
+ PDrvCryptoDigestExit();
+ PDrvCryptoAESExit();
+}
+
+/*------------------------------------------------------------------------- */
+
+void SCXPublicCryptoWaitForReadyBitInfinitely(u32 *pRegister, u32 vBit)
+{
+ while (!(INREG32(pRegister) & vBit))
+ ;
+}
+
+/*------------------------------------------------------------------------- */
+
+u32 SCXPublicCryptoWaitForReadyBit(u32 *pRegister, u32 vBit)
+{
+ u32 timeoutCounter = PUBLIC_CRYPTO_TIMEOUT_CONST;
+
+ while ((!(INREG32(pRegister) & vBit)) && ((--timeoutCounter) != 0))
+ ;
+
+ if (timeoutCounter == 0)
+ return PUBLIC_CRYPTO_ERR_TIMEOUT;
+
+ return PUBLIC_CRYPTO_OPERATION_SUCCESS;
+}
+
+/*------------------------------------------------------------------------- */
+
+static DEFINE_SPINLOCK(clk_lock);
+
+void SCXPublicCryptoDisableClock(uint32_t vClockPhysAddr)
+{
+ u32 *pClockReg;
+ u32 val;
+ unsigned long flags;
+
+ dprintk(KERN_INFO "SCXPublicCryptoDisableClock: " \
+ "vClockPhysAddr=0x%08X\n",
+ vClockPhysAddr);
+
+ /* Ensure none concurrent access when changing clock registers */
+ spin_lock_irqsave(&clk_lock, flags);
+
+ pClockReg = (u32 *)IO_ADDRESS(vClockPhysAddr);
+
+ val = __raw_readl(pClockReg);
+ val &= ~(0x3);
+ __raw_writel(val, pClockReg);
+
+ /* Wait for clock to be fully disabled */
+ while ((__raw_readl(pClockReg) & 0x30000) == 0)
+ ;
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+
+ tf_l4sec_clkdm_allow_idle(false, true);
+}
+
+/*------------------------------------------------------------------------- */
+
+void SCXPublicCryptoEnableClock(uint32_t vClockPhysAddr)
+{
+ u32 *pClockReg;
+ u32 val;
+ unsigned long flags;
+
+ dprintk(KERN_INFO "SCXPublicCryptoEnableClock: " \
+ "vClockPhysAddr=0x%08X\n",
+ vClockPhysAddr);
+
+ tf_l4sec_clkdm_wakeup(false, true);
+
+ /* Ensure none concurrent access when changing clock registers */
+ spin_lock_irqsave(&clk_lock, flags);
+
+ pClockReg = (u32 *)IO_ADDRESS(vClockPhysAddr);
+
+ val = __raw_readl(pClockReg);
+ val |= 0x2;
+ __raw_writel(val, pClockReg);
+
+ /* Wait for clock to be fully enabled */
+ while ((__raw_readl(pClockReg) & 0x30000) != 0)
+ ;
+
+ spin_unlock_irqrestore(&clk_lock, flags);
+}
+
diff --git a/security/smc/omap4/scx_public_crypto.h b/security/smc/omap4/scx_public_crypto.h
new file mode 100644
index 0000000..984cb18
--- /dev/null
+++ b/security/smc/omap4/scx_public_crypto.h
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c)2006-2008 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __SCX_PUBLIC_CRYPTO_H
+#define __SCX_PUBLIC_CRYPTO_H
+
+#include "scxlnx_defs.h"
+#include <linux/io.h>
+#include <mach/io.h>
+
+#include <clockdomain.h>
+
+/*-------------------------------------------------------------------------- */
+
+#define PUBLIC_CRYPTO_HWA_AES1 0x1
+#define PUBLIC_CRYPTO_HWA_AES2 0x2
+#define PUBLIC_CRYPTO_HWA_DES 0x4
+#define PUBLIC_CRYPTO_HWA_SHA 0x8
+
+#define OUTREG32(a, b) __raw_writel(b, a)
+#define INREG32(a) __raw_readl(a)
+#define SETREG32(x, y) OUTREG32(x, INREG32(x) | (y))
+#define CLRREG32(x, y) OUTREG32(x, INREG32(x) & ~(y))
+
+#define PUBLIC_CRYPTO_CLKSTCTRL_CLOCK_REG 0x4A009580
+#define PUBLIC_CRYPTO_AES1_CLOCK_REG 0x4A0095A0
+#define PUBLIC_CRYPTO_AES2_CLOCK_REG 0x4A0095A8
+#define PUBLIC_CRYPTO_DES3DES_CLOCK_REG 0x4A0095B0
+#define PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG 0x4A0095C8
+
+#define BYTES_TO_LONG(a)(u32)(a[0] | (a[1]<<8) | (a[2]<<16) | (a[3]<<24))
+#define LONG_TO_BYTE(a, b) { a[0] = (u8)((b) & 0xFF); \
+ a[1] = (u8)(((b) >> 8) & 0xFF); \
+ a[2] = (u8)(((b) >> 16) & 0xFF); \
+ a[3] = (u8)(((b) >> 24) & 0xFF); }
+
+#define IS_4_BYTES_ALIGNED(x)((!((x) & 0x3)) ? true : false)
+
+#define TF_SMC_OMAP4_PUBLIC_DMA
+
+/*
+ *The size limit to trigger DMA for AES, DES and Digest.
+ *0xFFFFFFFF means "never"
+ */
+#ifdef TF_SMC_OMAP4_PUBLIC_DMA
+#define DMA_TRIGGER_IRQ_AES 128
+#define DMA_TRIGGER_IRQ_DES 128
+#define DMA_TRIGGER_IRQ_DIGEST 1024
+#else
+#define DMA_TRIGGER_IRQ_AES 0xFFFFFFFF
+#define DMA_TRIGGER_IRQ_DES 0xFFFFFFFF
+#define DMA_TRIGGER_IRQ_DIGEST 0xFFFFFFFF
+#endif
+
+/*Error code constants */
+#define PUBLIC_CRYPTO_OPERATION_SUCCESS 0x00000000
+#define PUBLIC_CRYPTO_ERR_ACCESS_DENIED 0x00000001
+#define PUBLIC_CRYPTO_ERR_OUT_OF_MEMORY 0x00000002
+#define PUBLIC_CRYPTO_ERR_BAD_PARAMETERS 0x00000003
+#define PUBLIC_CRYPTO_ERR_TIMEOUT 0x00000004
+
+/*DMA mode constants */
+#define PUBLIC_CRYPTO_DMA_USE_NONE 0x00000000 /*No DMA used*/
+/*DMA with active polling used */
+#define PUBLIC_CRYPTO_DMA_USE_POLLING 0x00000001
+#define PUBLIC_CRYPTO_DMA_USE_IRQ 0x00000002 /*DMA with IRQ used*/
+
+#define PUBLIC_CRYPTO_REG_SET_BIT(x, y) OUTREG32(x, INREG32(x) | y);
+#define PUBLIC_CRYPTO_REG_UNSET_BIT(x, y) OUTREG32(x, INREG32(x) & (~y));
+
+#define AES_BLOCK_SIZE 16
+#define DES_BLOCK_SIZE 8
+#define HASH_BLOCK_SIZE 64
+
+#define HASH_MD5_LENGTH 16
+#define HASH_SHA1_LENGTH 20
+#define HASH_SHA224_LENGTH 28
+#define HASH_SHA256_LENGTH 32
+
+#define PUBLIC_CRYPTO_DIGEST_MAX_SIZE 32
+#define PUBLIC_CRYPTO_IV_MAX_SIZE 16
+
+#define PUBLIC_CRYPTO_HW_CLOCK_ADDR (0x48004A14)
+#define PUBLIC_CRYPTO_HW_AUTOIDLE_ADDR (0x48004A34)
+
+#define PUBLIC_CRYPTO_HW_CLOCK1_ADDR (0x48004A10)
+#define PUBLIC_CRYPTO_HW_AUTOIDLE1_ADDR (0x48004A30)
+
+#define DIGEST_CTRL_ALGO_MD5 0
+#define DIGEST_CTRL_ALGO_SHA1 1
+#define DIGEST_CTRL_ALGO_SHA224 2
+#define DIGEST_CTRL_ALGO_SHA256 3
+
+/*-------------------------------------------------------------------------- */
+/*
+ *The magic word.
+ */
+#define CRYPTOKI_UPDATE_SHORTCUT_CONTEXT_MAGIC 0x45EF683C
+
+/*-------------------------------------------------------------------------- */
+/* CUS context structure */
+/*-------------------------------------------------------------------------- */
+
+/* State of an AES operation */
+struct PUBLIC_CRYPTO_AES_OPERATION_STATE {
+ u32 AES_IV_0;
+ u32 AES_IV_1;
+ u32 AES_IV_2;
+ u32 AES_IV_3;
+
+ u32 CTRL;
+
+ /* Only used by Linux crypto API interface */
+ u32 KEY1_0;
+ u32 KEY1_1;
+ u32 KEY1_2;
+ u32 KEY1_3;
+ u32 KEY1_4;
+ u32 KEY1_5;
+ u32 KEY1_6;
+ u32 KEY1_7;
+
+ u32 key_is_public;
+};
+
+struct PUBLIC_CRYPTO_DES_OPERATION_STATE {
+ u32 DES_IV_L;
+ u32 DES_IV_H;
+};
+
+#define HASH_BLOCK_BYTES_LENGTH 64
+
+struct PUBLIC_CRYPTO_SHA_OPERATION_STATE {
+ /* Current digest */
+ u32 SHA_DIGEST_A;
+ u32 SHA_DIGEST_B;
+ u32 SHA_DIGEST_C;
+ u32 SHA_DIGEST_D;
+ u32 SHA_DIGEST_E;
+ u32 SHA_DIGEST_F;
+ u32 SHA_DIGEST_G;
+ u32 SHA_DIGEST_H;
+
+ /* This buffer contains a partial chunk */
+ u8 pChunkBuffer[HASH_BLOCK_BYTES_LENGTH];
+
+ /* Number of bytes stored in pChunkBuffer (0..64) */
+ u32 nChunkLength;
+
+ /*
+ * Total number of bytes processed so far
+ * (not including the partial chunk)
+ */
+ u32 nBytesProcessed;
+
+ u32 CTRL;
+};
+
+union PUBLIC_CRYPTO_OPERATION_STATE {
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE aes;
+ struct PUBLIC_CRYPTO_DES_OPERATION_STATE des;
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE sha;
+};
+
+/*
+ *Fully describes a public crypto operation
+ *(i.e., an operation that has a shortcut attached).
+ */
+struct CRYPTOKI_UPDATE_SHORTCUT_CONTEXT {
+ /*
+ *Identifies the public crypto operation in the list of all public
+ *operations.
+ */
+ struct list_head list;
+
+ u32 nMagicNumber; /*Must be set to
+ *{CRYPTOKI_UPDATE_SHORTCUT_CONTEXT_MAGIC} */
+
+ /*basic fields */
+ u32 hClientSession;
+ u32 nCommandID;
+ u32 nHWAID;
+ u32 nHWA_CTRL;
+ u32 hKeyContext;
+ union PUBLIC_CRYPTO_OPERATION_STATE sOperationState;
+ u32 nUseCount;
+ bool bSuspended;
+};
+
+struct CRYPTOKI_UPDATE_PARAMS {
+ /*fields for data processing of an update command */
+ u32 nInputDataLength;
+ u8 *pInputData;
+ struct SCXLNX_SHMEM_DESC *pInputShmem;
+
+ u32 nResultDataLength;
+ u8 *pResultData;
+ struct SCXLNX_SHMEM_DESC *pOutputShmem;
+
+ u8 *pS2CDataBuffer;
+ u32 nS2CDataBufferMaxLength;
+};
+
+/*-------------------------------------------------------------------------- */
+/*
+ *Public crypto API (Top level)
+ */
+
+/*
+*Initialize the public crypto DMA chanels and global HWA semaphores
+ */
+u32 SCXPublicCryptoInit(void);
+
+/*
+ *Initialize the device context CUS fields
+ *(shortcut semaphore and public CUS list)
+ */
+void SCXPublicCryptoInitDeviceContext(struct SCXLNX_CONNECTION *pDeviceContext);
+
+/**
+ *Terminate the public crypto (including DMA)
+ */
+void SCXPublicCryptoTerminate(void);
+
+int SCXPublicCryptoTryShortcutedUpdate(struct SCXLNX_CONNECTION *pConn,
+ struct SCX_COMMAND_INVOKE_CLIENT_COMMAND *pMessage,
+ struct SCX_ANSWER_INVOKE_CLIENT_COMMAND *pAnswer);
+
+int SCXPublicCryptoExecuteRPCCommand(u32 nRPCCommand, void *pRPCSharedBuffer);
+
+/*-------------------------------------------------------------------------- */
+/*
+ *Helper methods
+ */
+u32 SCXPublicCryptoWaitForReadyBit(u32 *pRegister, u32 vBit);
+void SCXPublicCryptoWaitForReadyBitInfinitely(u32 *pRegister, u32 vBit);
+
+void SCXPublicCryptoEnableClock(uint32_t vClockPhysAddr);
+void SCXPublicCryptoDisableClock(uint32_t vClockPhysAddr);
+
+#define LOCK_HWA true
+#define UNLOCK_HWA false
+
+void PDrvCryptoLockUnlockHWA(u32 nHWAID, bool bDoLock);
+
+/*---------------------------------------------------------------------------*/
+/* AES operations */
+/*---------------------------------------------------------------------------*/
+
+void PDrvCryptoAESInit(void);
+void PDrvCryptoAESExit(void);
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+int register_smc_public_crypto_aes(void);
+void unregister_smc_public_crypto_aes(void);
+#else
+static inline int register_smc_public_crypto_aes(void)
+{
+ return 0;
+}
+
+static inline void unregister_smc_public_crypto_aes(void) {}
+#endif
+
+/**
+ *This function performs an AES update operation.
+ *
+ *The AES1 accelerator is assumed loaded with the correct key
+ *
+ *AES_CTRL: defines the mode and direction
+ *pAESState: defines the operation IV
+ *pSrc: Input buffer to process.
+ *pDest: Output buffer containing the processed data.
+ *
+ *nbBlocks number of block(s)to process.
+ */
+bool PDrvCryptoUpdateAES(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState,
+ u8 *pSrc, u8 *pDest, u32 nbBlocks);
+
+/*---------------------------------------------------------------------------*/
+/* DES/DES3 operations */
+/*---------------------------------------------------------------------------*/
+
+void PDrvCryptoDESInit(void);
+void PDrvCryptoDESExit(void);
+
+/**
+ *This function performs a DES update operation.
+ *
+ *The DES accelerator is assumed loaded with the correct key
+ *
+ *DES_CTRL: defines the mode and direction
+ *pDESState: defines the operation IV
+ *pSrc: Input buffer to process.
+ *pDest: Output buffer containing the processed data.
+ *nbBlocks: Number of block(s)to process.
+ */
+bool PDrvCryptoUpdateDES(u32 DES_CTRL,
+ struct PUBLIC_CRYPTO_DES_OPERATION_STATE *pDESState,
+ u8 *pSrc, u8 *pDest, u32 nbBlocks);
+
+/*---------------------------------------------------------------------------*/
+/* Digest operations */
+/*---------------------------------------------------------------------------*/
+
+void PDrvCryptoDigestInit(void);
+void PDrvCryptoDigestExit(void);
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+int register_smc_public_crypto_digest(void);
+void unregister_smc_public_crypto_digest(void);
+#else
+static inline int register_smc_public_crypto_digest(void)
+{
+ return 0;
+}
+
+static inline void unregister_smc_public_crypto_digest(void) {}
+#endif
+
+/**
+ *This function performs a HASH update Operation.
+ *
+ *SHA_CTRL: defines the algorithm
+ *pSHAState: State of the operation
+ *pData: Input buffer to process
+ *dataLength: Length in bytes of the input buffer.
+ */
+void PDrvCryptoUpdateHash(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState,
+ u8 *pData, u32 dataLength);
+
+#endif /*__SCX_PUBLIC_CRYPTO_H */
diff --git a/security/smc/omap4/scx_public_crypto_AES.c b/security/smc/omap4/scx_public_crypto_AES.c
new file mode 100644
index 0000000..96b065f
--- /dev/null
+++ b/security/smc/omap4/scx_public_crypto_AES.c
@@ -0,0 +1,1180 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scx_public_crypto.h"
+#include "scx_public_dma.h"
+#include "scxlnx_mshield.h"
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/aes.h>
+#include <mach/io.h>
+
+/*
+ *AES Hardware Accelerator: Base address
+ */
+#define AES1_REGS_HW_ADDR 0x4B501000
+#define AES2_REGS_HW_ADDR 0x4B701000
+
+/*
+ *CTRL register Masks
+ */
+#define AES_CTRL_OUTPUT_READY_BIT (1<<0)
+#define AES_CTRL_INPUT_READY_BIT (1<<1)
+
+#define AES_CTRL_GET_DIRECTION(x) (x&4)
+#define AES_CTRL_DIRECTION_DECRYPT 0
+#define AES_CTRL_DIRECTION_ENCRYPT (1<<2)
+
+#define AES_CTRL_GET_KEY_SIZE(x) (x & 0x18)
+#define AES_CTRL_KEY_SIZE_128 0x08
+#define AES_CTRL_KEY_SIZE_192 0x10
+#define AES_CTRL_KEY_SIZE_256 0x18
+
+#define AES_CTRL_GET_MODE(x) ((x & 0x60) >> 5)
+#define AES_CTRL_IS_MODE_CBC(x) (AES_CTRL_GET_MODE(x) == 1)
+#define AES_CTRL_IS_MODE_ECB(x) (AES_CTRL_GET_MODE(x) == 0)
+#define AES_CTRL_IS_MODE_CTR(x) ((AES_CTRL_GET_MODE(x) == 2) || \
+ (AES_CTRL_GET_MODE(x) == 3))
+#define AES_CTRL_MODE_CBC_BIT 0x20
+#define AES_CTRL_MODE_ECB_BIT 0
+#define AES_CTRL_MODE_CTR_BIT 0x40
+
+#define AES_CTRL_GET_CTR_WIDTH(x) (x&0x180)
+#define AES_CTRL_CTR_WIDTH_32 0
+#define AES_CTRL_CTR_WIDTH_64 0x80
+#define AES_CTRL_CTR_WIDTH_96 0x100
+#define AES_CTRL_CTR_WIDTH_128 0x180
+
+/*
+ * SYSCONFIG register masks
+ */
+#define AES_SYSCONFIG_DMA_REQ_IN_EN_BIT (1 << 5)
+#define AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT (1 << 6)
+
+
+/*----------------------------------------------------------------------*/
+/* AES Context */
+/*----------------------------------------------------------------------*/
+/**
+ *This structure contains the registers of the AES HW accelerator.
+ */
+struct AESReg_t {
+ u32 AES_KEY2_6; /* 0x00 */
+ u32 AES_KEY2_7; /* 0xO4 */
+ u32 AES_KEY2_4; /* 0x08 */
+ u32 AES_KEY2_5; /* 0x0C */
+ u32 AES_KEY2_2; /* 0x10 */
+ u32 AES_KEY2_3; /* 0x14 */
+ u32 AES_KEY2_0; /* 0x18 */
+ u32 AES_KEY2_1; /* 0x1C */
+
+ u32 AES_KEY1_6; /* 0x20 */
+ u32 AES_KEY1_7; /* 0x24 */
+ u32 AES_KEY1_4; /* 0x28 */
+ u32 AES_KEY1_5; /* 0x2C */
+ u32 AES_KEY1_2; /* 0x30 */
+ u32 AES_KEY1_3; /* 0x34 */
+ u32 AES_KEY1_0; /* 0x38 */
+ u32 AES_KEY1_1; /* 0x3C */
+
+ u32 AES_IV_IN_0; /* 0x40 */
+ u32 AES_IV_IN_1; /* 0x44 */
+ u32 AES_IV_IN_2; /* 0x48 */
+ u32 AES_IV_IN_3; /* 0x4C */
+
+ u32 AES_CTRL; /* 0x50 */
+
+ u32 AES_C_LENGTH_0; /* 0x54 */
+ u32 AES_C_LENGTH_1; /* 0x58 */
+ u32 AES_AUTH_LENGTH; /* 0x5C */
+
+ u32 AES_DATA_IN_0; /* 0x60 */
+ u32 AES_DATA_IN_1; /* 0x64 */
+ u32 AES_DATA_IN_2; /* 0x68 */
+ u32 AES_DATA_IN_3; /* 0x6C */
+
+ u32 AES_TAG_OUT_0; /* 0x70 */
+ u32 AES_TAG_OUT_1; /* 0x74 */
+ u32 AES_TAG_OUT_2; /* 0x78 */
+ u32 AES_TAG_OUT_3; /* 0x7C */
+
+ u32 AES_REVISION; /* 0x80 */
+ u32 AES_SYSCONFIG; /* 0x84 */
+
+ u32 AES_SYSSTATUS; /* 0x88 */
+
+};
+static struct AESReg_t *pAESReg_t;
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+#define FLAGS_FAST BIT(7)
+#define FLAGS_BUSY 8
+
+struct aes_hwa_ctx {
+ unsigned long flags;
+
+ spinlock_t lock;
+ struct crypto_queue queue;
+
+ struct tasklet_struct task;
+
+ struct ablkcipher_request *req;
+ size_t total;
+ struct scatterlist *in_sg;
+ size_t in_offset;
+ struct scatterlist *out_sg;
+ size_t out_offset;
+
+ size_t buflen;
+ void *buf_in;
+ size_t dma_size;
+ int dma_in;
+ int dma_lch_in;
+ dma_addr_t dma_addr_in;
+ void *buf_out;
+ int dma_out;
+ int dma_lch_out;
+ dma_addr_t dma_addr_out;
+
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *ctx;
+};
+static struct aes_hwa_ctx *aes_ctx;
+#endif
+
+/*---------------------------------------------------------------------------
+ *Forward declarations
+ *------------------------------------------------------------------------- */
+
+static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest,
+ u32 nbBlocks);
+
+/*----------------------------------------------------------------------------
+ *Save HWA registers into the specified operation state structure
+ *--------------------------------------------------------------------------*/
+static void PDrvCryptoSaveAESRegisters(
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState)
+{
+ dprintk(KERN_INFO "PDrvCryptoSaveAESRegisters: \
+ pAESState(%p) <- pAESReg_t(%p): CTRL=0x%08x\n",
+ pAESState, pAESReg_t, pAESState->CTRL);
+
+ /*Save the IV if we are in CBC or CTR mode (not required for ECB) */
+ if (!AES_CTRL_IS_MODE_ECB(pAESState->CTRL)) {
+ pAESState->AES_IV_0 = INREG32(&pAESReg_t->AES_IV_IN_0);
+ pAESState->AES_IV_1 = INREG32(&pAESReg_t->AES_IV_IN_1);
+ pAESState->AES_IV_2 = INREG32(&pAESReg_t->AES_IV_IN_2);
+ pAESState->AES_IV_3 = INREG32(&pAESReg_t->AES_IV_IN_3);
+ }
+}
+
+/*----------------------------------------------------------------------------
+ *Restore the HWA registers from the operation state structure
+ *---------------------------------------------------------------------------*/
+static void PDrvCryptoRestoreAESRegisters(
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ dprintk(KERN_INFO "PDrvCryptoRestoreAESRegisters: \
+ pAESReg_t(%p) <- pAESState(%p): CTRL=0x%08x\n",
+ pAESReg_t, pAESState, pAESState->CTRL);
+
+ if (pAESState->key_is_public) {
+ OUTREG32(&pAESReg_t->AES_KEY1_0, pAESState->KEY1_0);
+ OUTREG32(&pAESReg_t->AES_KEY1_1, pAESState->KEY1_1);
+ OUTREG32(&pAESReg_t->AES_KEY1_2, pAESState->KEY1_2);
+ OUTREG32(&pAESReg_t->AES_KEY1_3, pAESState->KEY1_3);
+ OUTREG32(&pAESReg_t->AES_KEY1_4, pAESState->KEY1_4);
+ OUTREG32(&pAESReg_t->AES_KEY1_5, pAESState->KEY1_5);
+ OUTREG32(&pAESReg_t->AES_KEY1_6, pAESState->KEY1_6);
+ OUTREG32(&pAESReg_t->AES_KEY1_7, pAESState->KEY1_7);
+
+ /*
+ * Make sure a potential secure key that has been overwritten by
+ * the previous code is reinstalled before performing other
+ * public crypto operations.
+ */
+ pDevice->hAES1SecureKeyContext = 0;
+ } else {
+ pAESState->CTRL |= INREG32(&pAESReg_t->AES_CTRL);
+ }
+
+ /*
+ * Restore the IV first if we are in CBC or CTR mode
+ * (not required for ECB)
+ */
+ if (!AES_CTRL_IS_MODE_ECB(pAESState->CTRL)) {
+ OUTREG32(&pAESReg_t->AES_IV_IN_0, pAESState->AES_IV_0);
+ OUTREG32(&pAESReg_t->AES_IV_IN_1, pAESState->AES_IV_1);
+ OUTREG32(&pAESReg_t->AES_IV_IN_2, pAESState->AES_IV_2);
+ OUTREG32(&pAESReg_t->AES_IV_IN_3, pAESState->AES_IV_3);
+ }
+
+ /* Then set the CTRL register:
+ * overwrite the CTRL only when needed, because unconditionally doing
+ * it leads to break the HWA process (observed by experimentation)
+ */
+
+ pAESState->CTRL = (pAESState->CTRL & (3 << 3)) /* key size */
+ | (pAESState->CTRL & ((1 << 2) | (1 << 5) | (1 << 6)))
+ | (0x3 << 7) /* Always set CTR_WIDTH to 128-bit */;
+
+ if ((pAESState->CTRL & 0x1FC) !=
+ (INREG32(&pAESReg_t->AES_CTRL) & 0x1FC))
+ OUTREG32(&pAESReg_t->AES_CTRL, pAESState->CTRL & 0x1FC);
+
+ /* Set the SYSCONFIG register to 0 */
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG, 0);
+}
+
+/*-------------------------------------------------------------------------- */
+
+void PDrvCryptoAESInit(void)
+{
+ pAESReg_t = omap_ioremap(AES1_REGS_HW_ADDR, SZ_1M, MT_DEVICE);
+ if (pAESReg_t == NULL)
+ panic("Unable to remap AES1 module");
+}
+
+void PDrvCryptoAESExit(void)
+{
+ omap_iounmap(pAESReg_t);
+}
+
+bool PDrvCryptoUpdateAES(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *pAESState,
+ u8 *pSrc, u8 *pDest, u32 nbBlocks)
+{
+ u32 nbr_of_blocks;
+ u32 vTemp;
+ u8 *pProcessSrc = pSrc;
+ u8 *pProcessDest = pDest;
+ u32 dmaUse = PUBLIC_CRYPTO_DMA_USE_NONE;
+
+ /*
+ *Choice of the processing type
+ */
+ if (nbBlocks * AES_BLOCK_SIZE >= DMA_TRIGGER_IRQ_AES)
+ dmaUse = PUBLIC_CRYPTO_DMA_USE_IRQ;
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAES: \
+ pSrc=0x%08x, pDest=0x%08x, nbBlocks=0x%08x, dmaUse=0x%08x\n",
+ (unsigned int)pSrc,
+ (unsigned int)pDest,
+ (unsigned int)nbBlocks,
+ (unsigned int)dmaUse);
+
+ if (nbBlocks == 0) {
+ dprintk(KERN_INFO "PDrvCryptoUpdateAES: Nothing to process\n");
+ return true;
+ }
+
+ if ((AES_CTRL_GET_DIRECTION(INREG32(&pAESReg_t->AES_CTRL)) !=
+ AES_CTRL_GET_DIRECTION(pAESState->CTRL)) &&
+ !pAESState->key_is_public) {
+ dprintk(KERN_WARNING "HWA configured for another direction\n");
+ return false;
+ }
+
+ /*Restore the registers of the accelerator from the operation state */
+ PDrvCryptoRestoreAESRegisters(pAESState);
+
+ if (dmaUse == PUBLIC_CRYPTO_DMA_USE_IRQ) {
+ /* Perform the update with DMA */
+ PDrvCryptoUpdateAESWithDMA(pProcessSrc,
+ pProcessDest, nbBlocks);
+ } else {
+ for (nbr_of_blocks = 0;
+ nbr_of_blocks < nbBlocks; nbr_of_blocks++) {
+
+ /*We wait for the input ready */
+
+ /*Crash the system as this should never occur */
+ if (SCXPublicCryptoWaitForReadyBit(
+ (u32 *)&pAESReg_t->AES_CTRL,
+ AES_CTRL_INPUT_READY_BIT) !=
+ PUBLIC_CRYPTO_OPERATION_SUCCESS)
+ panic("Wait too long for AES hardware \
+ accelerator Input data to be ready\n");
+
+ /* We copy the 16 bytes of data src->reg */
+ vTemp = (u32) BYTES_TO_LONG(pProcessSrc);
+ OUTREG32(&pAESReg_t->AES_DATA_IN_0, vTemp);
+ pProcessSrc += 4;
+ vTemp = (u32) BYTES_TO_LONG(pProcessSrc);
+ OUTREG32(&pAESReg_t->AES_DATA_IN_1, vTemp);
+ pProcessSrc += 4;
+ vTemp = (u32) BYTES_TO_LONG(pProcessSrc);
+ OUTREG32(&pAESReg_t->AES_DATA_IN_2, vTemp);
+ pProcessSrc += 4;
+ vTemp = (u32) BYTES_TO_LONG(pProcessSrc);
+ OUTREG32(&pAESReg_t->AES_DATA_IN_3, vTemp);
+ pProcessSrc += 4;
+
+ /* We wait for the output ready */
+ SCXPublicCryptoWaitForReadyBitInfinitely(
+ (u32 *)&pAESReg_t->AES_CTRL,
+ AES_CTRL_OUTPUT_READY_BIT);
+
+ /* We copy the 16 bytes of data reg->dest */
+ vTemp = INREG32(&pAESReg_t->AES_DATA_IN_0);
+ LONG_TO_BYTE(pProcessDest, vTemp);
+ pProcessDest += 4;
+ vTemp = INREG32(&pAESReg_t->AES_DATA_IN_1);
+ LONG_TO_BYTE(pProcessDest, vTemp);
+ pProcessDest += 4;
+ vTemp = INREG32(&pAESReg_t->AES_DATA_IN_2);
+ LONG_TO_BYTE(pProcessDest, vTemp);
+ pProcessDest += 4;
+ vTemp = INREG32(&pAESReg_t->AES_DATA_IN_3);
+ LONG_TO_BYTE(pProcessDest, vTemp);
+ pProcessDest += 4;
+ }
+ }
+
+ /* Save the accelerator registers into the operation state */
+ PDrvCryptoSaveAESRegisters(pAESState);
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAES: Done\n");
+
+ return true;
+}
+
+/*-------------------------------------------------------------------------- */
+/*
+ *Static function, perform AES encryption/decryption using the DMA for data
+ *transfer.
+ *
+ *inputs: pSrc : pointer of the input data to process
+ * nbBlocks : number of block to process
+ * dmaUse : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA)
+ * | PUBLIC_CRYPTO_DMA_USE_POLLING (poll the end of DMA)
+ *output: pDest : pointer of the output data (can be eq to pSrc)
+ */
+static void PDrvCryptoUpdateAESWithDMA(u8 *pSrc, u8 *pDest, u32 nbBlocks)
+{
+ /*
+ *Note: The DMA only sees physical addresses !
+ */
+
+ int dma_ch0;
+ int dma_ch1;
+ struct omap_dma_channel_params ch0_parameters;
+ struct omap_dma_channel_params ch1_parameters;
+ u32 nLength = nbBlocks * AES_BLOCK_SIZE;
+ u32 nLengthLoop = 0;
+ u32 nbBlocksLoop = 0;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ dprintk(KERN_INFO
+ "PDrvCryptoUpdateAESWithDMA: In=0x%08x, Out=0x%08x, Len=%u\n",
+ (unsigned int)pSrc,
+ (unsigned int)pDest,
+ (unsigned int)nLength);
+
+ /*lock the DMA */
+ mutex_lock(&pDevice->sm.sDMALock);
+
+ if (scxPublicDMARequest(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ mutex_unlock(&pDevice->sm.sDMALock);
+ return;
+ }
+ if (scxPublicDMARequest(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ scxPublicDMARelease(dma_ch0);
+ mutex_unlock(&pDevice->sm.sDMALock);
+ return;
+ }
+
+ while (nLength > 0) {
+
+ /*
+ * At this time, we are sure that the DMAchannels
+ *are available and not used by other public crypto operation
+ */
+
+ /*DMA used for Input and Output */
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG,
+ INREG32(&pAESReg_t->AES_SYSCONFIG)
+ | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
+ | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);
+
+ /*check length */
+ if (nLength <= pDevice->nDMABufferLength)
+ nLengthLoop = nLength;
+ else
+ nLengthLoop = pDevice->nDMABufferLength;
+
+ /*The length is always a multiple of the block size */
+ nbBlocksLoop = nLengthLoop / AES_BLOCK_SIZE;
+
+ /*
+ *Copy the data from the input buffer into a preallocated
+ *buffer which is aligned on the beginning of a page.
+ *This may prevent potential issues when flushing/invalidating
+ *the buffer as the cache lines are 64 bytes long.
+ */
+ memcpy(pDevice->pDMABuffer, pSrc, nLengthLoop);
+
+ /*DMA1: Mem -> AES */
+ scxPublicSetDMAChannelCommonParams(&ch0_parameters,
+ nbBlocksLoop,
+ DMA_CEN_Elts_per_Frame_AES,
+ AES1_REGS_HW_ADDR + 0x60,
+ (u32)pDevice->pDMABufferPhys,
+ OMAP44XX_DMA_AES1_P_DATA_IN_REQ);
+
+ ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC;
+ ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAESWithDMA: \
+ scxPublicDMASetParams(ch0)\n");
+ scxPublicDMASetParams(dma_ch0, &ch0_parameters);
+
+ omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+
+ /*DMA2: AES -> Mem */
+ scxPublicSetDMAChannelCommonParams(&ch1_parameters,
+ nbBlocksLoop,
+ DMA_CEN_Elts_per_Frame_AES,
+ (u32)pDevice->pDMABufferPhys,
+ AES1_REGS_HW_ADDR + 0x60,
+ OMAP44XX_DMA_AES1_P_DATA_OUT_REQ);
+
+ ch1_parameters.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC;
+ ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAESWithDMA: \
+ scxPublicDMASetParams(ch1)\n");
+ scxPublicDMASetParams(dma_ch1, &ch1_parameters);
+
+ omap_set_dma_src_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_16);
+
+ wmb();
+
+ dprintk(KERN_INFO
+ "PDrvCryptoUpdateAESWithDMA: Start DMA channel %d\n",
+ (unsigned int)dma_ch1);
+ scxPublicDMAStart(dma_ch1, OMAP_DMA_BLOCK_IRQ);
+ dprintk(KERN_INFO
+ "PDrvCryptoUpdateAESWithDMA: Start DMA channel %d\n",
+ (unsigned int)dma_ch0);
+ scxPublicDMAStart(dma_ch0, OMAP_DMA_BLOCK_IRQ);
+
+ dprintk(KERN_INFO
+ "PDrvCryptoUpdateAESWithDMA: Waiting for IRQ\n");
+ scxPublicDMAWait(2);
+
+ /*Unset DMA synchronisation requests */
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG,
+ INREG32(&pAESReg_t->AES_SYSCONFIG)
+ & (~AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT)
+ & (~AES_SYSCONFIG_DMA_REQ_IN_EN_BIT));
+
+ scxPublicDMAClearChannel(dma_ch0);
+ scxPublicDMAClearChannel(dma_ch1);
+
+ /*
+ *The dma transfer is complete
+ */
+
+ /*The DMA output is in the preallocated aligned buffer
+ *and needs to be copied to the output buffer.*/
+ memcpy(pDest, pDevice->pDMABuffer, nLengthLoop);
+
+ pSrc += nLengthLoop;
+ pDest += nLengthLoop;
+ nLength -= nLengthLoop;
+ }
+
+ /*For safety reasons, let's clean the working buffer */
+ memset(pDevice->pDMABuffer, 0, nLengthLoop);
+
+ /*release the DMA */
+ scxPublicDMARelease(dma_ch0);
+ scxPublicDMARelease(dma_ch1);
+
+ mutex_unlock(&pDevice->sm.sDMALock);
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateAESWithDMA: Success\n");
+}
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+/*
+ * AES HWA registration into kernel crypto framework
+ */
+
+static void sg_copy_buf(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out)
+{
+ struct scatter_walk walk;
+
+ if (!nbytes)
+ return;
+
+ scatterwalk_start(&walk, sg);
+ scatterwalk_advance(&walk, start);
+ scatterwalk_copychunks(buf, &walk, nbytes, out);
+ scatterwalk_done(&walk, out, 0);
+}
+
+static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
+ size_t buflen, size_t total, int out)
+{
+ unsigned int count, off = 0;
+
+ while (buflen && total) {
+ count = min((*sg)->length - *offset, total);
+ count = min(count, buflen);
+
+ if (!count)
+ return off;
+
+ sg_copy_buf(buf + off, *sg, *offset, count, out);
+
+ off += count;
+ buflen -= count;
+ *offset += count;
+ total -= count;
+
+ if (*offset == (*sg)->length) {
+ *sg = sg_next(*sg);
+ if (*sg)
+ *offset = 0;
+ else
+ total = 0;
+ }
+ }
+
+ return off;
+}
+
+static int aes_dma_start(struct aes_hwa_ctx *ctx)
+{
+ int err, fast = 0, in, out;
+ size_t count;
+ dma_addr_t addr_in, addr_out;
+ struct omap_dma_channel_params dma_params;
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
+
+ if (sg_is_last(ctx->in_sg) && sg_is_last(ctx->out_sg)) {
+ in = IS_ALIGNED((u32)ctx->in_sg->offset, sizeof(u32));
+ out = IS_ALIGNED((u32)ctx->out_sg->offset, sizeof(u32));
+
+ fast = in && out;
+ }
+
+ if (fast) {
+ count = min(ctx->total, sg_dma_len(ctx->in_sg));
+ count = min(count, sg_dma_len(ctx->out_sg));
+
+ if (count != ctx->total)
+ return -EINVAL;
+
+ err = dma_map_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ if (!err)
+ return -EINVAL;
+
+ err = dma_map_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
+ if (!err) {
+ dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ return -EINVAL;
+ }
+
+ addr_in = sg_dma_address(ctx->in_sg);
+ addr_out = sg_dma_address(ctx->out_sg);
+
+ ctx->flags |= FLAGS_FAST;
+ } else {
+ count = sg_copy(&ctx->in_sg, &ctx->in_offset, ctx->buf_in,
+ ctx->buflen, ctx->total, 0);
+
+ addr_in = ctx->dma_addr_in;
+ addr_out = ctx->dma_addr_out;
+
+ ctx->flags &= ~FLAGS_FAST;
+ }
+
+ ctx->total -= count;
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
+
+ /* Configure HWA */
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ PDrvCryptoRestoreAESRegisters(state);
+
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG, INREG32(&pAESReg_t->AES_SYSCONFIG)
+ | AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
+ | AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);
+
+ ctx->dma_size = count;
+ if (!fast)
+ dma_sync_single_for_device(NULL, addr_in, count,
+ DMA_TO_DEVICE);
+
+ dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
+ dma_params.frame_count = count / AES_BLOCK_SIZE;
+ dma_params.elem_count = DMA_CEN_Elts_per_Frame_AES;
+ dma_params.src_ei = 0;
+ dma_params.src_fi = 0;
+ dma_params.dst_ei = 0;
+ dma_params.dst_fi = 0;
+ dma_params.sync_mode = OMAP_DMA_SYNC_FRAME;
+
+ /* IN */
+ dma_params.trigger = ctx->dma_in;
+ dma_params.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+ dma_params.dst_start = AES1_REGS_HW_ADDR + 0x60;
+ dma_params.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ dma_params.src_start = addr_in;
+ dma_params.src_amode = OMAP_DMA_AMODE_POST_INC;
+
+ omap_set_dma_params(ctx->dma_lch_in, &dma_params);
+
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_in, OMAP_DMA_DATA_BURST_16);
+
+ /* OUT */
+ dma_params.trigger = ctx->dma_out;
+ dma_params.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
+ dma_params.src_start = AES1_REGS_HW_ADDR + 0x60;
+ dma_params.src_amode = OMAP_DMA_AMODE_CONSTANT;
+ dma_params.dst_start = addr_out;
+ dma_params.dst_amode = OMAP_DMA_AMODE_POST_INC;
+
+ omap_set_dma_params(ctx->dma_lch_out, &dma_params);
+
+ omap_set_dma_dest_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_src_burst_mode(ctx->dma_lch_out, OMAP_DMA_DATA_BURST_16);
+
+ /* Is this really needed? */
+ omap_disable_dma_irq(ctx->dma_lch_in, OMAP_DMA_DROP_IRQ);
+ omap_enable_dma_irq(ctx->dma_lch_in, OMAP_DMA_BLOCK_IRQ);
+ omap_disable_dma_irq(ctx->dma_lch_out, OMAP_DMA_DROP_IRQ);
+ omap_enable_dma_irq(ctx->dma_lch_out, OMAP_DMA_BLOCK_IRQ);
+
+ wmb();
+
+ omap_start_dma(ctx->dma_lch_in);
+ omap_start_dma(ctx->dma_lch_out);
+
+ return 0;
+}
+
+static int aes_dma_stop(struct aes_hwa_ctx *ctx)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(ctx->req));
+ int err = 0;
+ size_t count;
+
+ dprintk(KERN_INFO "aes_dma_stop(%p)\n", ctx);
+
+ PDrvCryptoSaveAESRegisters(state);
+
+ if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
+ u32 *ptr = (u32 *) ctx->req->info;
+
+ ptr[0] = state->AES_IV_0;
+ ptr[1] = state->AES_IV_1;
+ ptr[2] = state->AES_IV_2;
+ ptr[3] = state->AES_IV_3;
+ }
+
+ OUTREG32(&pAESReg_t->AES_SYSCONFIG, 0);
+
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
+
+ omap_stop_dma(ctx->dma_lch_in);
+ omap_stop_dma(ctx->dma_lch_out);
+
+ if (ctx->flags & FLAGS_FAST) {
+ dma_unmap_sg(NULL, ctx->out_sg, 1, DMA_FROM_DEVICE);
+ dma_unmap_sg(NULL, ctx->in_sg, 1, DMA_TO_DEVICE);
+ } else {
+ dma_sync_single_for_device(NULL, ctx->dma_addr_out,
+ ctx->dma_size, DMA_FROM_DEVICE);
+
+ /* Copy data */
+ count = sg_copy(&ctx->out_sg, &ctx->out_offset, ctx->buf_out,
+ ctx->buflen, ctx->dma_size, 1);
+ if (count != ctx->dma_size)
+ err = -EINVAL;
+ }
+
+ if (err || !ctx->total)
+ ctx->req->base.complete(&ctx->req->base, err);
+
+ return err;
+}
+
+static void aes_dma_callback(int lch, u16 ch_status, void *data)
+{
+ struct aes_hwa_ctx *ctx = data;
+
+ if (lch == ctx->dma_lch_out)
+ tasklet_schedule(&ctx->task);
+}
+
+static int aes_dma_init(struct aes_hwa_ctx *ctx)
+{
+ int err = -ENOMEM;
+
+ ctx->dma_lch_out = -1;
+ ctx->dma_lch_in = -1;
+
+ ctx->buflen = PAGE_SIZE;
+ ctx->buflen &= ~(AES_BLOCK_SIZE - 1);
+
+ dprintk(KERN_INFO "aes_dma_init(%p)\n", ctx);
+
+ /* Allocate and map cache buffers */
+ ctx->buf_in = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_in,
+ GFP_KERNEL);
+ if (!ctx->buf_in) {
+ dprintk(KERN_ERR "SMC: Unable to alloc AES in cache buffer\n");
+ return -ENOMEM;
+ }
+
+ ctx->buf_out = dma_alloc_coherent(NULL, ctx->buflen, &ctx->dma_addr_out,
+ GFP_KERNEL);
+ if (!ctx->buf_out) {
+ dprintk(KERN_ERR "SMC: Unable to alloc AES out cache buffer\n");
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_in,
+ ctx->dma_addr_in);
+ return -ENOMEM;
+ }
+
+ /* Request DMA channels */
+ err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback, ctx,
+ &ctx->dma_lch_in);
+ if (err) {
+ dprintk(KERN_ERR "SMC: Unable to request AES RX DMA channel\n");
+ goto err_dma_in;
+ }
+
+ err = omap_request_dma(0, "smc-aes-rx", aes_dma_callback,
+ ctx, &ctx->dma_lch_out);
+ if (err) {
+ dprintk(KERN_ERR "SMC: Unable to request AES TX DMA channel\n");
+ goto err_dma_out;
+ }
+
+ dprintk(KERN_INFO "aes_dma_init(%p) configured DMA channels"
+ "(RX = %d, TX = %d)\n", ctx, ctx->dma_lch_in, ctx->dma_lch_out);
+
+ return 0;
+
+err_dma_out:
+ omap_free_dma(ctx->dma_lch_in);
+err_dma_in:
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in);
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out);
+
+ return err;
+}
+
+static void aes_dma_cleanup(struct aes_hwa_ctx *ctx)
+{
+ omap_free_dma(ctx->dma_lch_out);
+ omap_free_dma(ctx->dma_lch_in);
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_in, ctx->dma_addr_in);
+ dma_free_coherent(NULL, ctx->buflen, ctx->buf_out, ctx->dma_addr_out);
+}
+
+static int aes_handle_req(struct aes_hwa_ctx *ctx)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state;
+ struct crypto_async_request *async_req, *backlog;
+ struct ablkcipher_request *req;
+ unsigned long flags;
+
+ if (ctx->total)
+ goto start;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ backlog = crypto_get_backlog(&ctx->queue);
+ async_req = crypto_dequeue_request(&ctx->queue);
+ if (!async_req)
+ clear_bit(FLAGS_BUSY, &ctx->flags);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ if (!async_req)
+ return 0;
+
+ if (backlog)
+ backlog->complete(backlog, -EINPROGRESS);
+
+ req = ablkcipher_request_cast(async_req);
+
+ ctx->req = req;
+ ctx->total = req->nbytes;
+ ctx->in_offset = 0;
+ ctx->in_sg = req->src;
+ ctx->out_offset = 0;
+ ctx->out_sg = req->dst;
+
+ state = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ if (!AES_CTRL_IS_MODE_ECB(state->CTRL)) {
+ u32 *ptr = (u32 *) req->info;
+
+ state->AES_IV_0 = ptr[0];
+ state->AES_IV_1 = ptr[1];
+ state->AES_IV_2 = ptr[2];
+ state->AES_IV_3 = ptr[3];
+ }
+
+start:
+ return aes_dma_start(ctx);
+}
+
+static void aes_tasklet(unsigned long data)
+{
+ struct aes_hwa_ctx *ctx = (struct aes_hwa_ctx *) data;
+
+ aes_dma_stop(ctx);
+ aes_handle_req(ctx);
+}
+
+/* Generic */
+static int aes_setkey(struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state,
+ const u8 *key, unsigned int keylen)
+{
+ u32 *ptr = (u32 *)key;
+
+ switch (keylen) {
+ case 16:
+ state->CTRL |= AES_CTRL_KEY_SIZE_128;
+ break;
+ case 24:
+ state->CTRL |= AES_CTRL_KEY_SIZE_192;
+ break;
+ case 32:
+ state->CTRL |= AES_CTRL_KEY_SIZE_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ state->KEY1_0 = ptr[0];
+ state->KEY1_1 = ptr[1];
+ state->KEY1_2 = ptr[2];
+ state->KEY1_3 = ptr[3];
+ if (keylen >= 24) {
+ state->KEY1_4 = ptr[4];
+ state->KEY1_5 = ptr[5];
+ }
+ if (keylen == 32) {
+ state->KEY1_6 = ptr[6];
+ state->KEY1_7 = ptr[7];
+ }
+
+ state->key_is_public = 1;
+
+ return 0;
+}
+
+static int aes_operate(struct ablkcipher_request *req)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&aes_ctx->lock, flags);
+ err = ablkcipher_enqueue_request(&aes_ctx->queue, req);
+ spin_unlock_irqrestore(&aes_ctx->lock, flags);
+
+ if (!test_and_set_bit(FLAGS_BUSY, &aes_ctx->flags))
+ aes_handle_req(aes_ctx);
+
+ return err;
+}
+
+static int aes_encrypt(struct ablkcipher_request *req)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
+
+ return aes_operate(req);
+}
+
+static int aes_decrypt(struct ablkcipher_request *req)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
+
+ state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT);
+ state->CTRL |= AES_CTRL_DIRECTION_DECRYPT;
+
+ return aes_operate(req);
+}
+
+static int aes_single_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = crypto_tfm_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_ECB_BIT;
+
+ return aes_setkey(state, key, keylen);
+}
+
+static void aes_single_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state = crypto_tfm_ctx(tfm);
+
+ state->CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
+
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+ PDrvCryptoUpdateAES(state, (u8 *) in, out, 1);
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
+}
+
+static void aes_single_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_tfm_ctx(tfm);
+
+ state->CTRL &= ~(AES_CTRL_DIRECTION_ENCRYPT);
+ state->CTRL |= AES_CTRL_DIRECTION_DECRYPT;
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, LOCK_HWA);
+
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+ PDrvCryptoUpdateAES(state, (u8 *) in, out, 1);
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_AES1_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_AES1, UNLOCK_HWA);
+}
+
+/* AES ECB */
+static int aes_ecb_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_ECB_BIT;
+
+ return aes_setkey(state, key, keylen);
+}
+
+/* AES CBC */
+static int aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(tfm);
+
+ state->CTRL = AES_CTRL_MODE_CBC_BIT;
+
+ return aes_setkey(state, key, keylen);
+}
+
+/* AES CTR */
+static int aes_ctr_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct PUBLIC_CRYPTO_AES_OPERATION_STATE *state =
+ crypto_ablkcipher_ctx(tfm);
+
+ /* Always defaults to 128-bit counter */
+ state->CTRL = AES_CTRL_MODE_CTR_BIT | AES_CTRL_CTR_WIDTH_128;
+
+ return aes_setkey(state, key, keylen);
+}
+
+static struct crypto_alg smc_aes_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_priority = 999,
+ .cra_name = "aes",
+ .cra_driver_name = "aes-smc",
+ .cra_module = THIS_MODULE,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = AES_MIN_KEY_SIZE,
+ .cia_max_keysize = AES_MAX_KEY_SIZE,
+ .cia_setkey = aes_single_setkey,
+ .cia_encrypt = aes_single_encrypt,
+ .cia_decrypt = aes_single_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_ecb_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_priority = 999,
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "aes-ecb-smc",
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_ecb_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_ecb_setkey,
+ .encrypt = aes_encrypt,
+ .decrypt = aes_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_cbc_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_priority = 999,
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "aes-cbc-smc",
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_cbc_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE,
+ .setkey = aes_cbc_setkey,
+ .encrypt = aes_encrypt,
+ .decrypt = aes_decrypt,
+ }
+ },
+};
+
+static struct crypto_alg smc_aes_ctr_alg = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_priority = 999,
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "aes-ctr-smc",
+ .cra_module = THIS_MODULE,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize =
+ sizeof(struct PUBLIC_CRYPTO_AES_OPERATION_STATE),
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(smc_aes_ctr_alg.cra_list),
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = PUBLIC_CRYPTO_IV_MAX_SIZE,
+ .setkey = aes_ctr_setkey,
+ .encrypt = aes_encrypt,
+ .decrypt = aes_decrypt,
+ }
+ },
+};
+
+
+int register_smc_public_crypto_aes(void)
+{
+ int ret;
+
+ aes_ctx = kzalloc(sizeof(struct aes_hwa_ctx), GFP_KERNEL);
+ if (aes_ctx == NULL)
+ return -ENOMEM;
+
+ crypto_init_queue(&aes_ctx->queue, 1);
+ tasklet_init(&aes_ctx->task, aes_tasklet, (unsigned long)aes_ctx);
+ spin_lock_init(&aes_ctx->lock);
+
+ aes_ctx->dma_in = OMAP44XX_DMA_AES1_P_DATA_IN_REQ;
+ aes_ctx->dma_out = OMAP44XX_DMA_AES1_P_DATA_OUT_REQ;
+
+ ret = aes_dma_init(aes_ctx);
+ if (ret)
+ goto err_dma;
+
+ ret = crypto_register_alg(&smc_aes_alg);
+ if (ret)
+ goto err_dma;
+
+ ret = crypto_register_alg(&smc_aes_ecb_alg);
+ if (ret)
+ goto err_ecb;
+
+ ret = crypto_register_alg(&smc_aes_cbc_alg);
+ if (ret)
+ goto err_cbc;
+
+ ret = crypto_register_alg(&smc_aes_ctr_alg);
+ if (ret)
+ goto err_ctr;
+
+ return 0;
+
+err_ctr:
+ crypto_unregister_alg(&smc_aes_cbc_alg);
+err_cbc:
+ crypto_unregister_alg(&smc_aes_ecb_alg);
+err_ecb:
+ crypto_unregister_alg(&smc_aes_alg);
+err_dma:
+ tasklet_kill(&aes_ctx->task);
+ kfree(aes_ctx);
+ return ret;
+}
+
+void unregister_smc_public_crypto_aes(void)
+{
+ if (aes_ctx == NULL)
+ return;
+
+ crypto_unregister_alg(&smc_aes_alg);
+ crypto_unregister_alg(&smc_aes_ecb_alg);
+ crypto_unregister_alg(&smc_aes_cbc_alg);
+ crypto_unregister_alg(&smc_aes_ctr_alg);
+
+ tasklet_kill(&aes_ctx->task);
+
+ aes_dma_cleanup(aes_ctx);
+
+ kfree(aes_ctx);
+}
+#endif
diff --git a/security/smc/omap4/scx_public_crypto_Digest.c b/security/smc/omap4/scx_public_crypto_Digest.c
new file mode 100644
index 0000000..7a40089
--- /dev/null
+++ b/security/smc/omap4/scx_public_crypto_Digest.c
@@ -0,0 +1,964 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scx_public_crypto.h"
+#include "scx_public_dma.h"
+#include "scxlnx_mshield.h"
+
+#include <linux/io.h>
+#include <mach/io.h>
+#include <linux/crypto.h>
+#include <crypto/internal/hash.h>
+
+/*
+ * SHA2/MD5 Hardware Accelerator: Base address for SHA2/MD5 HIB2
+ * This is referenced as the SHA2MD5 module in the Crypto TRM
+ */
+#define DIGEST1_REGS_HW_ADDR 0x4B101000
+
+/*
+ * IRQSTATUS register Masks
+ */
+#define DIGEST_IRQSTATUS_OUTPUT_READY_BIT (1 << 0)
+#define DIGEST_IRQSTATUS_INPUT_READY_BIT (1 << 1)
+#define DIGEST_IRQSTATUS_PARTHASH_READY_BIT (1 << 2)
+#define DIGEST_IRQSTATUS_CONTEXT_READY_BIT (1 << 3)
+
+/*
+ * MODE register Masks
+ */
+#define DIGEST_MODE_GET_ALGO(x) ((x & 0x6) >> 1)
+#define DIGEST_MODE_SET_ALGO(x, a) ((a << 1) | (x & 0xFFFFFFF9))
+
+#define DIGEST_MODE_ALGO_CONST_BIT (1 << 3)
+#define DIGEST_MODE_CLOSE_HASH_BIT (1 << 4)
+
+/*
+ * SYSCONFIG register masks
+ */
+#define DIGEST_SYSCONFIG_PIT_EN_BIT (1 << 2)
+#define DIGEST_SYSCONFIG_PDMA_EN_BIT (1 << 3)
+#define DIGEST_SYSCONFIG_PCONT_SWT_BIT (1 << 6)
+#define DIGEST_SYSCONFIG_PADVANCED_BIT (1 << 7)
+
+/*-------------------------------------------------------------------------*/
+/* Digest Context */
+/*-------------------------------------------------------------------------*/
+/**
+ * This structure contains the registers of the SHA1/MD5 HW accelerator.
+ */
+struct Sha1Md5Reg_t {
+ u32 ODIGEST_A; /* 0x00 Outer Digest A */
+ u32 ODIGEST_B; /* 0x04 Outer Digest B */
+ u32 ODIGEST_C; /* 0x08 Outer Digest C */
+ u32 ODIGEST_D; /* 0x0C Outer Digest D */
+ u32 ODIGEST_E; /* 0x10 Outer Digest E */
+ u32 ODIGEST_F; /* 0x14 Outer Digest F */
+ u32 ODIGEST_G; /* 0x18 Outer Digest G */
+ u32 ODIGEST_H; /* 0x1C Outer Digest H */
+ u32 IDIGEST_A; /* 0x20 Inner Digest A */
+ u32 IDIGEST_B; /* 0x24 Inner Digest B */
+ u32 IDIGEST_C; /* 0x28 Inner Digest C */
+ u32 IDIGEST_D; /* 0x2C Inner Digest D */
+ u32 IDIGEST_E; /* 0x30 Inner Digest E */
+ u32 IDIGEST_F; /* 0x34 Inner Digest F */
+ u32 IDIGEST_G; /* 0x38 Inner Digest G */
+ u32 IDIGEST_H; /* 0x3C Inner Digest H */
+ u32 DIGEST_COUNT; /* 0x40 Digest count */
+ u32 MODE; /* 0x44 Digest mode */
+ u32 LENGTH; /* 0x48 Data length */
+
+ u32 reserved0[13];
+
+ u32 DIN_0; /* 0x80 Data 0 */
+ u32 DIN_1; /* 0x84 Data 1 */
+ u32 DIN_2; /* 0x88 Data 2 */
+ u32 DIN_3; /* 0x8C Data 3 */
+ u32 DIN_4; /* 0x90 Data 4 */
+ u32 DIN_5; /* 0x94 Data 5 */
+ u32 DIN_6; /* 0x98 Data 6 */
+ u32 DIN_7; /* 0x9C Data 7 */
+ u32 DIN_8; /* 0xA0 Data 8 */
+ u32 DIN_9; /* 0xA4 Data 9 */
+ u32 DIN_10; /* 0xA8 Data 10 */
+ u32 DIN_11; /* 0xAC Data 11 */
+ u32 DIN_12; /* 0xB0 Data 12 */
+ u32 DIN_13; /* 0xB4 Data 13 */
+ u32 DIN_14; /* 0xB8 Data 14 */
+ u32 DIN_15; /* 0xBC Data 15 */
+
+ u32 reserved1[16];
+
+ u32 REVISION; /* 0x100 Revision */
+
+ u32 reserved2[3];
+
+ u32 SYSCONFIG; /* 0x110 Config */
+ u32 SYSSTATUS; /* 0x114 Status */
+ u32 IRQSTATUS; /* 0x118 IRQ Status */
+ u32 IRQENABLE; /* 0x11C IRQ Enable */
+};
+
+static struct Sha1Md5Reg_t *pSha1Md5Reg_t;
+
+static const u8 md5OverEmptyString[] = {
+ 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
+ 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e
+};
+
+static const u8 sha1OverEmptyString[] = {
+ 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
+ 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
+ 0xaf, 0xd8, 0x07, 0x09
+};
+
+static const u8 sha224OverEmptyString[] = {
+ 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
+ 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
+ 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
+ 0xc5, 0xb3, 0xe4, 0x2f
+};
+
+static const u8 sha256OverEmptyString[] = {
+ 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
+ 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
+ 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
+ 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
+};
+
+/*------------------------------------------------------------------------
+ *Forward declarations
+ *------------------------------------------------------------------------- */
+
+static void static_Hash_HwPerform64bDigest(u32 *pData,
+ u32 nAlgo, u32 nBytesProcessed);
+static void static_Hash_HwPerformDmaDigest(u8 *pData, u32 nDataLength,
+ u32 nAlgo, u32 nBytesProcessed);
+
+static void PDrvCryptoUpdateHashWithDMA(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState,
+ u8 *pData, u32 dataLength);
+
+
+/*-------------------------------------------------------------------------
+ *Save HWA registers into the specified operation state structure
+ *------------------------------------------------------------------------*/
+static void PDrvCryptoSaveHashRegisters(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState)
+{
+ dprintk(KERN_INFO "PDrvCryptoSaveHashRegisters: State=%p\n",
+ pSHAState);
+
+ pSHAState->SHA_DIGEST_A = INREG32(&pSha1Md5Reg_t->IDIGEST_A);
+ pSHAState->SHA_DIGEST_B = INREG32(&pSha1Md5Reg_t->IDIGEST_B);
+ pSHAState->SHA_DIGEST_C = INREG32(&pSha1Md5Reg_t->IDIGEST_C);
+ pSHAState->SHA_DIGEST_D = INREG32(&pSha1Md5Reg_t->IDIGEST_D);
+ pSHAState->SHA_DIGEST_E = INREG32(&pSha1Md5Reg_t->IDIGEST_E);
+ pSHAState->SHA_DIGEST_F = INREG32(&pSha1Md5Reg_t->IDIGEST_F);
+ pSHAState->SHA_DIGEST_G = INREG32(&pSha1Md5Reg_t->IDIGEST_G);
+ pSHAState->SHA_DIGEST_H = INREG32(&pSha1Md5Reg_t->IDIGEST_H);
+}
+
+/*-------------------------------------------------------------------------
+ *Restore the HWA registers from the operation state structure
+ *-------------------------------------------------------------------------*/
+static void PDrvCryptoRestoreHashRegisters(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState)
+{
+ dprintk(KERN_INFO "PDrvCryptoRestoreHashRegisters: State=%p\n",
+ pSHAState);
+
+ if (pSHAState->nBytesProcessed != 0) {
+ /*
+ * Some bytes were already processed. Initialize
+ * previous digest
+ */
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_A, pSHAState->SHA_DIGEST_A);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_B, pSHAState->SHA_DIGEST_B);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_C, pSHAState->SHA_DIGEST_C);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_D, pSHAState->SHA_DIGEST_D);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_E, pSHAState->SHA_DIGEST_E);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_F, pSHAState->SHA_DIGEST_F);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_G, pSHAState->SHA_DIGEST_G);
+ OUTREG32(&pSha1Md5Reg_t->IDIGEST_H, pSHAState->SHA_DIGEST_H);
+ }
+
+ OUTREG32(&pSha1Md5Reg_t->SYSCONFIG, 0);
+}
+
+/*------------------------------------------------------------------------- */
+
+void PDrvCryptoDigestInit(void)
+{
+ pSha1Md5Reg_t = omap_ioremap(DIGEST1_REGS_HW_ADDR, SZ_1M, MT_DEVICE);
+ if (pSha1Md5Reg_t == NULL)
+ panic("Unable to remap SHA2/MD5 module");
+}
+
+void PDrvCryptoDigestExit(void)
+{
+ omap_iounmap(pSha1Md5Reg_t);
+}
+
+void PDrvCryptoUpdateHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState,
+ u8 *pData, u32 dataLength)
+{
+ u32 dmaUse = PUBLIC_CRYPTO_DMA_USE_NONE;
+
+ /*
+ *Choice of the processing type
+ */
+ if (dataLength >= DMA_TRIGGER_IRQ_DIGEST)
+ dmaUse = PUBLIC_CRYPTO_DMA_USE_IRQ;
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateHash : "\
+ "Data=0x%08x/%u, Chunk=%u, Processed=%u, dmaUse=0x%08x\n",
+ (u32)pData, (u32)dataLength,
+ pSHAState->nChunkLength, pSHAState->nBytesProcessed,
+ dmaUse);
+
+ if (dataLength == 0) {
+ dprintk(KERN_INFO "PDrvCryptoUpdateHash: "\
+ "Nothing to process\n");
+ return;
+ }
+
+ if (dmaUse != PUBLIC_CRYPTO_DMA_USE_NONE) {
+ /*
+ * Restore the registers of the accelerator from the operation
+ * state
+ */
+ PDrvCryptoRestoreHashRegisters(pSHAState);
+
+ /*perform the updates with DMA */
+ PDrvCryptoUpdateHashWithDMA(pSHAState, pData, dataLength);
+
+ /* Save the accelerator registers into the operation state */
+ PDrvCryptoSaveHashRegisters(pSHAState);
+ } else {
+ /*Non-DMA transfer */
+
+ /*(1)We take the chunk buffer wich contains the last saved
+ *data that could not be yet processed because we had not
+ *enough data to make a 64B buffer. Then we try to make a
+ *64B buffer by concatenating it with the new passed data
+ */
+
+ /*Is there any data in the chunk? If yes is it possible to
+ *make a 64B buffer with the new data passed ? */
+ if ((pSHAState->nChunkLength != 0)
+ && (pSHAState->nChunkLength + dataLength >=
+ HASH_BLOCK_BYTES_LENGTH)) {
+
+ u8 vLengthToComplete =
+ HASH_BLOCK_BYTES_LENGTH - pSHAState->nChunkLength;
+
+ /*So we fill the chunk buffer with the new data to
+ *complete to 64B */
+ memcpy(pSHAState->pChunkBuffer + pSHAState->
+ nChunkLength, pData, vLengthToComplete);
+
+ if (pSHAState->nChunkLength + dataLength ==
+ HASH_BLOCK_BYTES_LENGTH) {
+ /*We'll keep some data for the final */
+ pSHAState->nChunkLength =
+ HASH_BLOCK_BYTES_LENGTH;
+ dprintk(KERN_INFO "PDrvCryptoUpdateHash: "\
+ "Done: Chunk=%u; Processed=%u\n",
+ pSHAState->nChunkLength,
+ pSHAState->nBytesProcessed);
+ return;
+ }
+
+ /*
+ * Restore the registers of the accelerator from the
+ * operation state
+ */
+ PDrvCryptoRestoreHashRegisters(pSHAState);
+
+ /*Then we send this buffer to the HWA */
+ static_Hash_HwPerform64bDigest(
+ (u32 *)pSHAState->pChunkBuffer, pSHAState->CTRL,
+ pSHAState->nBytesProcessed);
+
+ /*
+ * Save the accelerator registers into the operation
+ * state
+ */
+ PDrvCryptoSaveHashRegisters(pSHAState);
+
+ pSHAState->nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+
+ /*We have flushed the chunk so it is empty now */
+ pSHAState->nChunkLength = 0;
+
+ /*Then we have less data to process */
+ pData += vLengthToComplete;
+ dataLength -= vLengthToComplete;
+ }
+
+ /*(2)We process all the 64B buffer that we can */
+ if (pSHAState->nChunkLength + dataLength >=
+ HASH_BLOCK_BYTES_LENGTH) {
+
+ while (dataLength > HASH_BLOCK_BYTES_LENGTH) {
+ u8 pTempAlignedBuffer[HASH_BLOCK_BYTES_LENGTH];
+
+ /*
+ *We process a 64B buffer
+ */
+ /*We copy the data to process to an aligned
+ *buffer */
+ memcpy(pTempAlignedBuffer, pData,
+ HASH_BLOCK_BYTES_LENGTH);
+
+ /*Then we send this buffer to the hash
+ *hardware */
+ PDrvCryptoRestoreHashRegisters(pSHAState);
+ static_Hash_HwPerform64bDigest(
+ (u32 *) pTempAlignedBuffer,
+ pSHAState->CTRL,
+ pSHAState->nBytesProcessed);
+ PDrvCryptoSaveHashRegisters(pSHAState);
+
+ pSHAState->nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+
+ /*Then we decrease the remaining data of 64B */
+ pData += HASH_BLOCK_BYTES_LENGTH;
+ dataLength -= HASH_BLOCK_BYTES_LENGTH;
+ }
+ }
+
+ /*(3)We look if we have some data that could not be processed
+ *yet because it is not large enough to fill a buffer of 64B */
+ if (dataLength > 0) {
+ if (pSHAState->nChunkLength + dataLength >
+ HASH_BLOCK_BYTES_LENGTH) {
+ /*Should never be in this case !!! */
+ panic("PDrvCryptoUpdateHash: nChunkLength + \
+ dataLength > HASH_BLOCK_BYTES_LENGTH\n");
+ }
+
+ /*So we fill the chunk buffer with the new data to
+ *complete to 64B */
+ memcpy(pSHAState->pChunkBuffer + pSHAState->
+ nChunkLength, pData, dataLength);
+ pSHAState->nChunkLength += dataLength;
+ }
+ }
+
+ dprintk(KERN_INFO "PDrvCryptoUpdateHash: Done: "\
+ "Chunk=%u; Processed=%u\n",
+ pSHAState->nChunkLength, pSHAState->nBytesProcessed);
+}
+
+/*------------------------------------------------------------------------- */
+
+static void static_Hash_HwPerform64bDigest(u32 *pData,
+ u32 nAlgo, u32 nBytesProcessed)
+{
+ u32 nAlgoConstant = 0;
+
+ OUTREG32(&pSha1Md5Reg_t->DIGEST_COUNT, nBytesProcessed);
+
+ if (nBytesProcessed == 0) {
+ /* No bytes processed so far. Will use the algo constant instead
+ of previous digest */
+ nAlgoConstant = 1 << 3;
+ }
+
+ OUTREG32(&pSha1Md5Reg_t->MODE,
+ nAlgoConstant | (nAlgo & 0x6));
+ OUTREG32(&pSha1Md5Reg_t->LENGTH, HASH_BLOCK_BYTES_LENGTH);
+
+ if (SCXPublicCryptoWaitForReadyBit(
+ (u32 *)&pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_INPUT_READY_BIT)
+ != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ /* Crash the system as this should never occur */
+ panic("Wait too long for DIGEST HW accelerator" \
+ "Input data to be ready\n");
+ }
+
+ /*
+ *The pData buffer is a buffer of 64 bytes.
+ */
+ OUTREG32(&pSha1Md5Reg_t->DIN_0, pData[0]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_1, pData[1]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_2, pData[2]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_3, pData[3]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_4, pData[4]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_5, pData[5]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_6, pData[6]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_7, pData[7]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_8, pData[8]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_9, pData[9]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_10, pData[10]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_11, pData[11]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_12, pData[12]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_13, pData[13]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_14, pData[14]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_15, pData[15]);
+
+ /*
+ *Wait until the hash operation is finished.
+ */
+ SCXPublicCryptoWaitForReadyBitInfinitely(
+ (u32 *)&pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_OUTPUT_READY_BIT);
+}
+
+/*------------------------------------------------------------------------- */
+
+static void static_Hash_HwPerformDmaDigest(u8 *pData, u32 nDataLength,
+ u32 nAlgo, u32 nBytesProcessed)
+{
+ /*
+ *Note: The DMA only sees physical addresses !
+ */
+
+ int dma_ch0;
+ struct omap_dma_channel_params ch0_parameters;
+ u32 nLengthLoop = 0;
+ u32 nAlgoConstant;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ dprintk(KERN_INFO
+ "static_Hash_HwPerformDmaDigest: Buffer=0x%08x/%u\n",
+ (u32)pData, (u32)nDataLength);
+
+ /*lock the DMA */
+ mutex_lock(&pDevice->sm.sDMALock);
+ if (scxPublicDMARequest(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ mutex_unlock(&pDevice->sm.sDMALock);
+ return;
+ }
+
+ while (nDataLength > 0) {
+
+ nAlgoConstant = 0;
+ if (nBytesProcessed == 0) {
+ /*No bytes processed so far. Will use the algo
+ *constant instead of previous digest */
+ nAlgoConstant = 1 << 3;
+ }
+
+ /*check length */
+ if (nDataLength <= pDevice->nDMABufferLength)
+ nLengthLoop = nDataLength;
+ else
+ nLengthLoop = pDevice->nDMABufferLength;
+
+ /*
+ *Copy the data from the input buffer into a preallocated
+ *buffer which is aligned on the beginning of a page.
+ *This may prevent potential issues when flushing/invalidating
+ *the buffer as the cache lines are 64 bytes long.
+ */
+ memcpy(pDevice->pDMABuffer, pData, nLengthLoop);
+
+ /*DMA1: Mem -> HASH */
+ scxPublicSetDMAChannelCommonParams(&ch0_parameters,
+ nLengthLoop / HASH_BLOCK_BYTES_LENGTH,
+ DMA_CEN_Elts_per_Frame_SHA,
+ DIGEST1_REGS_HW_ADDR + 0x80,
+ pDevice->pDMABufferPhys,
+ OMAP44XX_DMA_SHA2_DIN_P);
+
+ /*specific for Mem -> HWA */
+ ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC;
+ ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT;
+ ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC;
+
+ scxPublicDMASetParams(dma_ch0, &ch0_parameters);
+
+ omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+ omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_16);
+
+ OUTREG32(&pSha1Md5Reg_t->DIGEST_COUNT, nBytesProcessed);
+ OUTREG32(&pSha1Md5Reg_t->MODE,
+ nAlgoConstant | (nAlgo & 0x6));
+
+ /*
+ * Triggers operation
+ * Interrupt, Free Running + GO (DMA on)
+ */
+ OUTREG32(&pSha1Md5Reg_t->SYSCONFIG,
+ INREG32(&pSha1Md5Reg_t->SYSCONFIG) |
+ DIGEST_SYSCONFIG_PDMA_EN_BIT);
+ OUTREG32(&pSha1Md5Reg_t->LENGTH, nLengthLoop);
+
+ wmb();
+
+ scxPublicDMAStart(dma_ch0, OMAP_DMA_BLOCK_IRQ);
+
+ scxPublicDMAWait(1);
+
+ OUTREG32(&pSha1Md5Reg_t->SYSCONFIG, 0);
+
+ scxPublicDMAClearChannel(dma_ch0);
+
+ pData += nLengthLoop;
+ nDataLength -= nLengthLoop;
+ nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+ }
+
+ /*For safety reasons, let's clean the working buffer */
+ memset(pDevice->pDMABuffer, 0, nLengthLoop);
+
+ /*release the DMA */
+ scxPublicDMARelease(dma_ch0);
+
+ mutex_unlock(&pDevice->sm.sDMALock);
+
+ /*
+ * The dma transfert is finished, now wait until the hash
+ * operation is finished.
+ */
+ SCXPublicCryptoWaitForReadyBitInfinitely(
+ (u32 *)&pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_CONTEXT_READY_BIT);
+}
+
+/*------------------------------------------------------------------------- */
+/*
+ *Static function, perform data digest using the DMA for data transfer.
+ *
+ *inputs:
+ * pData : pointer of the input data to process
+ * dataLength : number of byte to process
+ */
+static void PDrvCryptoUpdateHashWithDMA(
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *pSHAState,
+ u8 *pData, u32 dataLength)
+{
+ dprintk(KERN_INFO "PDrvCryptoUpdateHashWithDMA\n");
+
+ if (pSHAState->nChunkLength != 0) {
+
+ u32 vLengthToComplete;
+
+ /*Fill the chunk first */
+ if (pSHAState->
+ nChunkLength + dataLength <= HASH_BLOCK_BYTES_LENGTH) {
+
+ /*So we fill the chunk buffer with the new data */
+ memcpy(pSHAState->
+ pChunkBuffer + pSHAState->nChunkLength,
+ pData, dataLength);
+ pSHAState->nChunkLength += dataLength;
+
+ /*We'll keep some data for the final */
+ return;
+ }
+
+ vLengthToComplete = HASH_BLOCK_BYTES_LENGTH - pSHAState->
+ nChunkLength;
+
+ if (vLengthToComplete != 0) {
+ /*So we fill the chunk buffer with the new data to
+ *complete to 64B */
+ memcpy(pSHAState->pChunkBuffer + pSHAState->
+ nChunkLength, pData, vLengthToComplete);
+ }
+
+ /*Then we send this buffer to the HWA (no DMA) */
+ static_Hash_HwPerform64bDigest(
+ (u32 *)pSHAState->pChunkBuffer, pSHAState->CTRL,
+ pSHAState->nBytesProcessed);
+
+ pSHAState->nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+
+ /*We have flushed the chunk so it is empty now */
+ pSHAState->nChunkLength = 0;
+
+ /*Update the data buffer depending of the data already
+ *processed */
+ pData += vLengthToComplete;
+ dataLength -= vLengthToComplete;
+ }
+
+ if (dataLength > HASH_BLOCK_BYTES_LENGTH) {
+
+ /*DMA only manages data length that is multiple of 64b */
+ u32 vDmaProcessSize = dataLength & 0xFFFFFFC0;
+
+ if (vDmaProcessSize == dataLength) {
+ /*We keep one block for the final */
+ vDmaProcessSize -= HASH_BLOCK_BYTES_LENGTH;
+ }
+
+ static_Hash_HwPerformDmaDigest(pData, vDmaProcessSize,
+ pSHAState->CTRL, pSHAState->nBytesProcessed);
+
+ pSHAState->nBytesProcessed =
+ INREG32(&pSha1Md5Reg_t->DIGEST_COUNT);
+ pData += vDmaProcessSize;
+ dataLength -= vDmaProcessSize;
+ }
+
+ /*At that point, there is less than 64b left to process*/
+ if ((dataLength == 0) || (dataLength > HASH_BLOCK_BYTES_LENGTH)) {
+ /*Should never be in this case !!! */
+ panic("PDrvCryptoUpdateHASHWithDMA: \
+ Remaining dataLength=%u\n", dataLength);
+ }
+
+ /*We now fill the chunk buffer with the remaining data */
+ memcpy(pSHAState->pChunkBuffer, pData, dataLength);
+ pSHAState->nChunkLength = dataLength;
+}
+
+#ifdef CONFIG_SMC_KERNEL_CRYPTO
+static void PDrvCryptoInitHash(u32 alg,
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state)
+{
+ memset(state, 0, sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE));
+
+ state->CTRL = alg << 1;
+}
+
+static int static_Hash_HwReadDigest(u32 algo, u8 *out)
+{
+ u32 regs, tmp;
+ u32 idx = 0, i;
+
+ switch (algo) {
+ case DIGEST_CTRL_ALGO_MD5:
+ regs = 4;
+ break;
+ case DIGEST_CTRL_ALGO_SHA1:
+ regs = 5;
+ break;
+ case DIGEST_CTRL_ALGO_SHA224:
+ regs = 7;
+ break;
+ case DIGEST_CTRL_ALGO_SHA256:
+ regs = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < regs; i++) {
+ tmp = INREG32(&pSha1Md5Reg_t->IDIGEST_A + i);
+
+ out[idx++] = (u8) ((tmp >> 0) & 0xff);
+ out[idx++] = (u8) ((tmp >> 8) & 0xff);
+ out[idx++] = (u8) ((tmp >> 16) & 0xff);
+ out[idx++] = (u8) ((tmp >> 24) & 0xff);
+ }
+
+ return 0;
+}
+
+static int PDrvCryptoFinalHash(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state,
+ u8 *out)
+{
+ u32 *data = (u32 *) state->pChunkBuffer;
+
+ /* Hashing an empty string? */
+ if (state->nBytesProcessed + state->nChunkLength == 0) {
+ switch (DIGEST_MODE_GET_ALGO(state->CTRL)) {
+ case DIGEST_CTRL_ALGO_MD5:
+ memcpy(out, md5OverEmptyString, HASH_MD5_LENGTH);
+ break;
+ case DIGEST_CTRL_ALGO_SHA1:
+ memcpy(out, sha1OverEmptyString, HASH_SHA1_LENGTH);
+ break;
+ case DIGEST_CTRL_ALGO_SHA224:
+ memcpy(out, sha224OverEmptyString, HASH_SHA224_LENGTH);
+ break;
+ case DIGEST_CTRL_ALGO_SHA256:
+ memcpy(out, sha256OverEmptyString, HASH_SHA256_LENGTH);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ PDrvCryptoRestoreHashRegisters(state);
+
+ /*
+ * At this point, the chunk buffer should contain the last block of data
+ * needed for the final.
+ */
+ OUTREG32(&pSha1Md5Reg_t->DIGEST_COUNT, state->nBytesProcessed);
+ OUTREG32(&pSha1Md5Reg_t->MODE,
+ (state->CTRL & 0x6) | 0x10 |
+ (state->nBytesProcessed == 0) << 3);
+ OUTREG32(&pSha1Md5Reg_t->LENGTH, state->nChunkLength);
+
+ if (SCXPublicCryptoWaitForReadyBit(
+ (u32 *) &pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_INPUT_READY_BIT)
+ != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
+ /* Crash the system as this should never occur */
+ panic("Wait too long for DIGEST HW accelerator"
+ "Input data to be ready\n");
+ }
+
+ OUTREG32(&pSha1Md5Reg_t->DIN_0, data[0]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_1, data[1]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_2, data[2]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_3, data[3]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_4, data[4]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_5, data[5]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_6, data[6]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_7, data[7]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_8, data[8]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_9, data[9]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_10, data[10]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_11, data[11]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_12, data[12]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_13, data[13]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_14, data[14]);
+ OUTREG32(&pSha1Md5Reg_t->DIN_15, data[15]);
+
+ /* Wait till the hash operation is finished */
+ SCXPublicCryptoWaitForReadyBitInfinitely(
+ (u32 *) &pSha1Md5Reg_t->IRQSTATUS,
+ DIGEST_IRQSTATUS_OUTPUT_READY_BIT);
+
+ return static_Hash_HwReadDigest(DIGEST_MODE_GET_ALGO(state->CTRL), out);
+}
+
+/*
+ * Digest HWA registration into kernel crypto framework
+ */
+
+static int digest_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
+
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ PDrvCryptoUpdateHash(state, (u8 *) data, len);
+
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
+
+ return 0;
+}
+
+static int digest_final(struct shash_desc *desc, u8 *out)
+{
+ int ret;
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
+
+ SCXPublicCryptoEnableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ ret = PDrvCryptoFinalHash(state, out);
+
+ SCXPublicCryptoDisableClock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+
+ PDrvCryptoLockUnlockHWA(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
+
+ return ret;
+}
+
+static int digest_import(struct shash_desc *desc, const void *in)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ memcpy(state, in, sizeof(*state));
+ return 0;
+}
+
+static int digest_export(struct shash_desc *desc, void *out)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ memcpy(out, state, sizeof(*state));
+ return 0;
+}
+
+/* MD5 */
+static int md5_init(struct shash_desc *desc)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoInitHash(DIGEST_CTRL_ALGO_MD5, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_md5_alg = {
+ .digestsize = HASH_MD5_LENGTH,
+ .init = md5_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/* SHA1 */
+static int sha1_init(struct shash_desc *desc)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoInitHash(DIGEST_CTRL_ALGO_SHA1, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_sha1_alg = {
+ .digestsize = HASH_SHA1_LENGTH,
+ .init = sha1_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/* SHA224 */
+static int sha224_init(struct shash_desc *desc)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoInitHash(DIGEST_CTRL_ALGO_SHA224, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_sha224_alg = {
+ .digestsize = HASH_SHA224_LENGTH,
+ .init = sha224_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "sha224-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+/* SHA256 */
+static int sha256_init(struct shash_desc *desc)
+{
+ struct PUBLIC_CRYPTO_SHA_OPERATION_STATE *state = shash_desc_ctx(desc);
+
+ PDrvCryptoInitHash(DIGEST_CTRL_ALGO_SHA256, state);
+
+ return 0;
+}
+
+static struct shash_alg smc_sha256_alg = {
+ .digestsize = HASH_SHA256_LENGTH,
+ .init = sha256_init,
+ .update = digest_update,
+ .final = digest_final,
+ .export = digest_export,
+ .import = digest_import,
+ .descsize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .statesize = sizeof(struct PUBLIC_CRYPTO_SHA_OPERATION_STATE),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-smc",
+ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_priority = 999,
+ .cra_blocksize = HASH_BLOCK_BYTES_LENGTH,
+ .cra_module = THIS_MODULE,
+ }
+};
+
+int register_smc_public_crypto_digest(void)
+{
+ int ret;
+
+ dprintk(KERN_INFO "SMC: Registering digest algorithms\n");
+
+ ret = crypto_register_shash(&smc_md5_alg);
+ if (ret)
+ return ret;
+
+ ret = crypto_register_shash(&smc_sha1_alg);
+ if (ret)
+ goto sha1_err;
+
+ ret = crypto_register_shash(&smc_sha224_alg);
+ if (ret)
+ goto sha224_err;
+
+ ret = crypto_register_shash(&smc_sha256_alg);
+ if (ret)
+ goto sha256_err;
+
+ return 0;
+
+sha256_err:
+ crypto_unregister_shash(&smc_sha224_alg);
+sha224_err:
+ crypto_unregister_shash(&smc_sha1_alg);
+sha1_err:
+ crypto_unregister_shash(&smc_md5_alg);
+ return ret;
+}
+
+void unregister_smc_public_crypto_digest(void)
+{
+ dprintk(KERN_INFO "SMC: Unregistering digest algorithms\n");
+
+ crypto_unregister_shash(&smc_md5_alg);
+ crypto_unregister_shash(&smc_sha1_alg);
+ crypto_unregister_shash(&smc_sha224_alg);
+ crypto_unregister_shash(&smc_sha256_alg);
+}
+#endif
diff --git a/security/smc/omap4/scx_public_dma.c b/security/smc/omap4/scx_public_dma.c
new file mode 100644
index 0000000..743c333
--- /dev/null
+++ b/security/smc/omap4/scx_public_dma.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#include "scx_public_dma.h"
+
+#include <asm/atomic.h>
+
+static atomic_t g_dmaEventFlag = ATOMIC_INIT(0);
+
+/*------------------------------------------------------------------------ */
+/*
+ * Internal functions
+ */
+
+static void scxPublicDMACallback(int lch, u16 ch_status, void *data)
+{
+ atomic_inc(&g_dmaEventFlag);
+}
+
+/*------------------------------------------------------------------------ */
+/*
+ * Public DMA API
+ */
+
+u32 scxPublicDMARequest(int *lch)
+{
+ int dma_ch_out = 0;
+
+ if (lch == NULL)
+ return PUBLIC_CRYPTO_ERR_BAD_PARAMETERS;
+
+ if (omap_request_dma(0, "SMC Public Crypto",
+ scxPublicDMACallback, NULL, &dma_ch_out) != 0)
+ return PUBLIC_CRYPTO_ERR_OUT_OF_MEMORY;
+
+ omap_disable_dma_irq(dma_ch_out, OMAP_DMA_DROP_IRQ |
+ OMAP_DMA_BLOCK_IRQ);
+
+ *lch = dma_ch_out;
+
+ return PUBLIC_CRYPTO_OPERATION_SUCCESS;
+}
+
+/*------------------------------------------------------------------------ */
+/*
+ * Release a DMA channel
+ */
+u32 scxPublicDMARelease(int lch)
+{
+ omap_free_dma(lch);
+
+ return PUBLIC_CRYPTO_OPERATION_SUCCESS;
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMASetParams(int lch, struct omap_dma_channel_params *pParams)
+{
+ omap_set_dma_params(lch, pParams);
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMAStart(int lch, int interruptMask)
+{
+ atomic_set(&g_dmaEventFlag, 0);
+ omap_enable_dma_irq(lch, interruptMask);
+ omap_start_dma(lch);
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMADisableChannel(int lch)
+{
+ omap_stop_dma(lch);
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMAClearChannel(int lch)
+{
+ omap_clear_dma(lch);
+}
+
+/*------------------------------------------------------------------------ */
+
+void scxPublicDMAWait(int nr_of_cb)
+{
+ while (atomic_read(&g_dmaEventFlag) < nr_of_cb)
+ cpu_relax();
+}
+
+/*------------------------------------------------------------------------ */
+/*
+ * Perform common DMA channel setup, used to factorize the code
+ *
+ * Output: struct omap_dma_channel_params *pDMAChannel
+ * Inputs: u32 nbBlocks Number of block of the transfer
+ * u32 nbElements Number of elements of the transfer
+ * u32 nDstStart Destination address
+ * u32 nSrcStart Source address
+ * u32 nTriggerID Trigger ID
+ */
+void scxPublicSetDMAChannelCommonParams(
+ struct omap_dma_channel_params *pDMAChannel,
+ u32 nbBlocks, u32 nbElements,
+ u32 nDstStart, u32 nSrcStart, u32 nTriggerID)
+{
+ pDMAChannel->data_type = OMAP_DMA_DATA_TYPE_S32;
+ pDMAChannel->elem_count = nbElements;
+ pDMAChannel->frame_count = nbBlocks;
+ pDMAChannel->src_ei = 0;
+ pDMAChannel->src_fi = 0;
+ pDMAChannel->dst_ei = 0;
+ pDMAChannel->dst_fi = 0;
+ pDMAChannel->sync_mode = OMAP_DMA_SYNC_FRAME;
+ pDMAChannel->src_start = nSrcStart;
+ pDMAChannel->dst_start = nDstStart;
+ pDMAChannel->trigger = nTriggerID;
+}
diff --git a/security/smc/omap4/scx_public_dma.h b/security/smc/omap4/scx_public_dma.h
new file mode 100644
index 0000000..ddd19b2
--- /dev/null
+++ b/security/smc/omap4/scx_public_dma.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __SCX_PUBLIC_DMA_H
+#define __SCX_PUBLIC_DMA_H
+
+#include <linux/dma-mapping.h>
+#include <plat/dma.h>
+#include <plat/dma-44xx.h>
+
+#include "scx_public_crypto.h"
+
+/*---------------------------------------------------------------------------
+ * Cache management (implemented in the assembler file)
+ *-------------------------------------------------------------------------- */
+
+u32 v7_dma_flush_range(u32 nVAStart, u32 nVAEnd);
+u32 v7_dma_inv_range(u32 nVAStart, u32 nVAEnd);
+
+/*-------------------------------------------------------------------------- */
+/*
+ * Public DMA API
+ */
+
+/*
+ * CEN Masks
+ */
+#define DMA_CEN_Elts_per_Frame_AES 4
+#define DMA_CEN_Elts_per_Frame_DES 2
+#define DMA_CEN_Elts_per_Frame_SHA 16
+
+/*
+ * Request a DMA channel
+ */
+u32 scxPublicDMARequest(int *lch);
+
+/*
+ * Release a DMA channel
+ */
+u32 scxPublicDMARelease(int lch);
+
+/**
+ * This function waits for the DMA IRQ.
+ */
+void scxPublicDMAWait(int nr_of_cb);
+
+/*
+ * This function starts a DMA operation.
+ *
+ * lch DMA channel ID.
+ * interruptMask Configures the Channel Interrupt Control Register.
+ */
+void scxPublicDMAStart(int lch, int interruptMask);
+
+void scxPublicSetDMAChannelCommonParams(
+ struct omap_dma_channel_params *pDMAChannel,
+ u32 nbBlocks, u32 nbElements, u32 nDstStart,
+ u32 nSrcStart, u32 nTriggerID);
+void scxPublicDMASetParams(int lch, struct omap_dma_channel_params *pParams);
+void scxPublicDMADisableChannel(int lch);
+void scxPublicDMAClearChannel(int lch);
+
+#endif /*__SCX_PUBLIC_DMA_H */
diff --git a/security/smc/omap4/scxlnx_comm_mshield.c b/security/smc/omap4/scxlnx_comm_mshield.c
new file mode 100644
index 0000000..ccd2098
--- /dev/null
+++ b/security/smc/omap4/scxlnx_comm_mshield.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/div64.h>
+#include <asm/system.h>
+#include <asm/cputype.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+#include <linux/jiffies.h>
+#include <linux/dma-mapping.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+
+#include <clockdomain.h>
+
+#include "scxlnx_defs.h"
+
+#ifdef CONFIG_HAS_WAKELOCK
+static struct wake_lock g_tf_wake_lock;
+static atomic_t tf_wake_lock_count = ATOMIC_INIT(0);
+#endif
+
+static struct clockdomain *smc_l4_sec_clkdm;
+static atomic_t smc_l4_sec_clkdm_use_count = ATOMIC_INIT(0);
+
+static int __init tf_early_init(void)
+{
+ smc_l4_sec_clkdm = clkdm_lookup("l4_secure_clkdm");
+ if (smc_l4_sec_clkdm == NULL)
+ return -EFAULT;
+
+#ifdef CONFIG_HAS_WAKELOCK
+ wake_lock_init(&g_tf_wake_lock, WAKE_LOCK_SUSPEND,
+ SCXLNX_DEVICE_BASE_NAME);
+#endif
+
+ return 0;
+}
+early_initcall(tf_early_init);
+
+/*--------------------------------------------------------------------------
+ * L4 SEC Clock domain handling
+ *-------------------------------------------------------------------------- */
+
+void tf_l4sec_clkdm_wakeup(bool use_spin_lock, bool wakelock)
+{
+ if (use_spin_lock)
+ spin_lock(&SCXLNXGetDevice()->sm.lock);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (wakelock) {
+ atomic_inc(&tf_wake_lock_count);
+ wake_lock(&g_tf_wake_lock);
+ }
+#endif
+ atomic_inc(&smc_l4_sec_clkdm_use_count);
+ clkdm_wakeup(smc_l4_sec_clkdm);
+ if (use_spin_lock)
+ spin_unlock(&SCXLNXGetDevice()->sm.lock);
+}
+
+void tf_l4sec_clkdm_allow_idle(bool use_spin_lock, bool wakeunlock)
+{
+ if (use_spin_lock)
+ spin_lock(&SCXLNXGetDevice()->sm.lock);
+ if (atomic_dec_return(&smc_l4_sec_clkdm_use_count) == 0)
+ clkdm_allow_idle(smc_l4_sec_clkdm);
+#ifdef CONFIG_HAS_WAKELOCK
+ if (wakeunlock)
+ if (atomic_dec_return(&tf_wake_lock_count) == 0)
+ wake_unlock(&g_tf_wake_lock);
+#endif
+ if (use_spin_lock)
+ spin_unlock(&SCXLNXGetDevice()->sm.lock);
+}
+
diff --git a/security/smc/omap4/scxlnx_defs.h b/security/smc/omap4/scxlnx_defs.h
new file mode 100644
index 0000000..a6dcb9c
--- /dev/null
+++ b/security/smc/omap4/scxlnx_defs.h
@@ -0,0 +1,539 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCXLNX_DEFS_H__
+#define __SCXLNX_DEFS_H__
+
+#include <asm/atomic.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/sysfs.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#include "scx_protocol.h"
+
+/*----------------------------------------------------------------------------*/
+
+#define SIZE_1KB 0x400
+
+/*
+ * Maximum number of shared memory blocks that can be reigsters in a connection
+ */
+#define SCXLNX_SHMEM_MAX_COUNT (64)
+
+/*
+ * Describes the possible types of shared memories
+ *
+ * SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are preallocated when initializing the
+ * connection
+ * SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM :
+ * The descriptor describes a registered shared memory.
+ * Its coarse pages are not preallocated
+ * SCXLNX_SHMEM_TYPE_PM_HIBERNATE :
+ * The descriptor describes a power management shared memory.
+ */
+enum SCXLNX_SHMEM_TYPE {
+ SCXLNX_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
+ SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM,
+ SCXLNX_SHMEM_TYPE_PM_HIBERNATE,
+};
+
+
+/*
+ * This structure contains a pointer on a coarse page table
+ */
+struct SCXLNX_COARSE_PAGE_TABLE {
+ /*
+ * Identifies the coarse page table descriptor in
+ * sFreeCoarsePageTables list
+ */
+ struct list_head list;
+
+ /*
+ * The address of the coarse page table
+ */
+ u32 *pDescriptors;
+
+ /*
+ * The address of the array containing this coarse page table
+ */
+ struct SCXLNX_COARSE_PAGE_TABLE_ARRAY *pParent;
+};
+
+
+#define SCXLNX_PAGE_DESCRIPTOR_TYPE_NORMAL 0
+#define SCXLNX_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
+
+/*
+ * This structure describes an array of up to 4 coarse page tables
+ * allocated within a single 4KB page.
+ */
+struct SCXLNX_COARSE_PAGE_TABLE_ARRAY {
+ /*
+ * identifies the element in the sCoarsePageTableArrays list
+ */
+ struct list_head list;
+
+ /*
+ * Type of page descriptor
+ * can take any of SCXLNX_PAGE_DESCRIPTOR_TYPE_XXX value
+ */
+ u32 nType;
+
+ struct SCXLNX_COARSE_PAGE_TABLE sCoarsePageTables[4];
+
+ /*
+ * A counter of the number of coarse pages currently used
+ * the max value should be 4 (one coarse page table is 1KB while one
+ * page is 4KB)
+ */
+ u8 nReferenceCount;
+};
+
+
+/*
+ * This structure describes a list of coarse page table arrays
+ * with some of the coarse page tables free. It is used
+ * when the driver needs to allocate a new coarse page
+ * table.
+ */
+struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * The list of allocated coarse page table arrays
+ */
+ struct list_head sCoarsePageTableArrays;
+
+ /*
+ * The list of free coarse page tables
+ */
+ struct list_head sFreeCoarsePageTables;
+};
+
+
+/*
+ * Fully describes a shared memory block
+ */
+struct SCXLNX_SHMEM_DESC {
+ /*
+ * Identifies the shared memory descriptor in the list of free shared
+ * memory descriptors
+ */
+ struct list_head list;
+
+ /*
+ * Identifies the type of shared memory descriptor
+ */
+ enum SCXLNX_SHMEM_TYPE nType;
+
+ /*
+ * The identifier of the block of shared memory, as returned by the
+ * Secure World.
+ * This identifier is hBlock field of a REGISTER_SHARED_MEMORY answer
+ */
+ u32 hIdentifier;
+
+ /* Client buffer */
+ u8 *pBuffer;
+
+ /* Up to eight coarse page table context */
+ struct SCXLNX_COARSE_PAGE_TABLE *pCoarsePageTable[SCX_MAX_COARSE_PAGES];
+
+ u32 nNumberOfCoarsePageTables;
+
+ /* Reference counter */
+ atomic_t nRefCnt;
+};
+
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * This structure describes the communication with the Secure World
+ *
+ * Note that this driver supports only one instance of the Secure World
+ */
+struct SCXLNX_COMM {
+ /*
+ * The spin lock protecting concurrent access to the structure.
+ */
+ spinlock_t lock;
+
+ /*
+ * Bit vector with the following possible flags:
+ * - SCXLNX_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
+ * the IRQ has been successfuly requested.
+ * - SCXLNX_COMM_FLAG_TERMINATING: If set, indicates that the
+ * communication with the Secure World is being terminated.
+ * Transmissions to the Secure World are not permitted
+ * - SCXLNX_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
+ * W3B buffer has been allocated.
+ *
+ * This bit vector must be accessed with the kernel's atomic bitwise
+ * operations.
+ */
+ unsigned long nFlags;
+
+ /*
+ * The virtual address of the L1 shared buffer.
+ */
+ struct SCHANNEL_C1S_BUFFER *pBuffer;
+
+ /*
+ * The wait queue the client threads are waiting on.
+ */
+ wait_queue_head_t waitQueue;
+
+#ifdef CONFIG_TF_TRUSTZONE
+ /*
+ * The interrupt line used by the Secure World.
+ */
+ int nSoftIntIrq;
+
+ /* ----- W3B ----- */
+ /* shared memory descriptor to identify the W3B */
+ struct SCXLNX_SHMEM_DESC sW3BShmemDesc;
+
+ /* Virtual address of the kernel allocated shared memory */
+ u32 nW3BShmemVAddr;
+
+ /* offset of data in shared memory coarse pages */
+ u32 nW3BShmemOffset;
+
+ u32 nW3BShmemSize;
+
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT
+ sW3BAllocationContext;
+#endif
+#ifdef CONFIG_TF_MSHIELD
+ /*
+ * The SE SDP can only be initialized once...
+ */
+ int bSEInitialized;
+
+ /* Virtual address of the L0 communication buffer */
+ void *pInitSharedBuffer;
+
+ /*
+ * Lock to be held by a client when executing an RPC
+ */
+ struct mutex sRPCLock;
+
+ /*
+ * Lock to protect concurrent accesses to DMA channels
+ */
+ struct mutex sDMALock;
+#endif
+};
+
+
+#define SCXLNX_COMM_FLAG_IRQ_REQUESTED (0)
+#define SCXLNX_COMM_FLAG_PA_AVAILABLE (1)
+#define SCXLNX_COMM_FLAG_TERMINATING (2)
+#define SCXLNX_COMM_FLAG_W3B_ALLOCATED (3)
+#define SCXLNX_COMM_FLAG_L1_SHARED_ALLOCATED (4)
+
+/*----------------------------------------------------------------------------*/
+
+struct SCXLNX_DEVICE_STATS {
+ struct kobject kobj;
+
+ struct kobj_type kobj_type;
+
+ struct attribute kobj_stat_attribute;
+
+ struct attribute *kobj_attribute_list[2];
+
+ atomic_t stat_pages_allocated;
+ atomic_t stat_memories_allocated;
+ atomic_t stat_pages_locked;
+};
+
+/*
+ * This structure describes the information about one device handled by the
+ * driver. Note that the driver supports only a single device. see the global
+ * variable g_SCXLNXDevice
+ */
+struct SCXLNX_DEVICE {
+ /*
+ * The device number for the device.
+ */
+ dev_t nDevNum;
+
+ /*
+ * Interfaces the system device with the kernel.
+ */
+ struct sys_device sysdev;
+
+ /*
+ * Interfaces the char device with the kernel.
+ */
+ struct cdev cdev;
+
+#ifdef CONFIG_TF_MSHIELD
+ struct cdev cdev_ctrl;
+
+ /*
+ * Globals for CUS
+ */
+ /* Current key handles loaded in HWAs */
+ u32 hAES1SecureKeyContext;
+ u32 hAES2SecureKeyContext;
+ u32 hDESSecureKeyContext;
+ bool bSHAM1IsPublic;
+
+ /* Semaphores used to serialize HWA accesses */
+ struct semaphore sAES1CriticalSection;
+ struct semaphore sAES2CriticalSection;
+ struct mutex sDESCriticalSection;
+ struct mutex sSHACriticalSection;
+
+ /*
+ * An aligned and correctly shaped pre-allocated buffer used for DMA
+ * transfers
+ */
+ u32 nDMABufferLength;
+ u8 *pDMABuffer;
+ dma_addr_t pDMABufferPhys;
+
+ /* Workspace allocated at boot time and reserved to the Secure World */
+ u32 nWorkspaceAddr;
+ u32 nWorkspaceSize;
+#endif
+
+ /*
+ * Communications with the SM.
+ */
+ struct SCXLNX_COMM sm;
+
+ /*
+ * Lists the connections attached to this device. A connection is
+ * created each time a user space application "opens" a file descriptor
+ * on the driver
+ */
+ struct list_head conns;
+
+ /*
+ * The spin lock used to protect concurrent access to the connection
+ * list.
+ */
+ spinlock_t connsLock;
+
+ struct SCXLNX_DEVICE_STATS sDeviceStats;
+
+ /*
+ * A Mutex to provide exlusive locking of the ioctl()
+ */
+ struct mutex dev_mutex;
+};
+
+/* the bits of the nFlags field of the SCXLNX_DEVICE structure */
+#define SCXLNX_DEVICE_FLAG_CDEV_INITIALIZED (0)
+#define SCXLNX_DEVICE_FLAG_SYSDEV_CLASS_REGISTERED (1)
+#define SCXLNX_DEVICE_FLAG_SYSDEV_REGISTERED (2)
+#define SCXLNX_DEVICE_FLAG_CDEV_REGISTERED (3)
+#define SCXLNX_DEVICE_FLAG_CDEV_ADDED (4)
+#define SCXLNX_DEVICE_SYSFS_REGISTERED (5)
+
+/*----------------------------------------------------------------------------*/
+/*
+ * This type describes a connection state.
+ * This is used to determine whether a message is valid or not.
+ *
+ * Messages are only valid in a certain device state.
+ * Messages may be invalidated between the start of the ioctl call and the
+ * moment the message is sent to the Secure World.
+ *
+ * SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT :
+ * The connection has no DEVICE_CONTEXT created and no
+ * CREATE_DEVICE_CONTEXT being processed by the Secure World
+ * SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
+ * The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure
+ * World
+ * SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT :
+ * The connection has a DEVICE_CONTEXT created and no
+ * DESTROY_DEVICE_CONTEXT is being processed by the Secure World
+ * SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
+ * The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure
+ * World
+ */
+enum SCXLNX_CONN_STATE {
+ SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT = 0,
+ SCXLNX_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
+ SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT,
+ SCXLNX_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
+};
+
+
+/*
+ * This type describes the status of the command.
+ *
+ * PENDING:
+ * The initial state; the command has not been sent yet.
+ * SENT:
+ * The command has been sent, we are waiting for an answer.
+ * ABORTED:
+ * The command cannot be sent because the device context is invalid.
+ * Note that this only covers the case where some other thread
+ * sent a DESTROY_DEVICE_CONTEXT command.
+ */
+enum SCXLNX_COMMAND_STATE {
+ SCXLNX_COMMAND_STATE_PENDING = 0,
+ SCXLNX_COMMAND_STATE_SENT,
+ SCXLNX_COMMAND_STATE_ABORTED
+};
+
+
+/*
+ * This structure describes a connection to the driver
+ * A connection is created each time an application opens a file descriptor on
+ * the driver
+ */
+struct SCXLNX_CONNECTION {
+ /*
+ * Identifies the connection in the list of the connections attached to
+ * the same device.
+ */
+ struct list_head list;
+
+ /*
+ * State of the connection.
+ */
+ enum SCXLNX_CONN_STATE nState;
+
+ /*
+ * A pointer to the corresponding device structure
+ */
+ struct SCXLNX_DEVICE *pDevice;
+
+ /*
+ * A spinlock to use to access nState
+ */
+ spinlock_t stateLock;
+
+ /*
+ * Counts the number of operations currently pending on the connection.
+ * (for debug only)
+ */
+ atomic_t nPendingOpCounter;
+
+ /*
+ * A handle for the device context
+ */
+ u32 hDeviceContext;
+
+ /*
+ * Lists the used shared memory descriptors
+ */
+ struct list_head sUsedSharedMemoryList;
+
+ /*
+ * Lists the free shared memory descriptors
+ */
+ struct list_head sFreeSharedMemoryList;
+
+ /*
+ * A mutex to use to access this structure
+ */
+ struct mutex sharedMemoriesMutex;
+
+ /*
+ * Counts the number of shared memories registered.
+ */
+ atomic_t nShmemAllocated;
+
+ /*
+ * Page to retrieve memory properties when
+ * registering shared memory through REGISTER_SHARED_MEMORY
+ * messages
+ */
+ struct vm_area_struct **ppVmas;
+
+ /*
+ * coarse page table allocation context
+ */
+ struct SCXLNX_COARSE_PAGE_TABLE_ALLOCATION_CONTEXT sAllocationContext;
+
+#ifdef CONFIG_TF_MSHIELD
+ /* Lists all the Cryptoki Update Shortcuts */
+ struct list_head ShortcutList;
+
+ /* Lock to protect concurrent accesses to ShortcutList */
+ spinlock_t shortcutListCriticalSectionLock;
+#endif
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * The nOperationID field of a message points to this structure.
+ * It is used to identify the thread that triggered the message transmission
+ * Whoever reads an answer can wake up that thread using the completion event
+ */
+struct SCXLNX_ANSWER_STRUCT {
+ bool bAnswerCopied;
+ union SCX_ANSWER_MESSAGE *pAnswer;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * The ASCII-C string representation of the base name of the devices managed by
+ * this driver.
+ */
+#define SCXLNX_DEVICE_BASE_NAME "tf_driver"
+
+
+/**
+ * The major and minor numbers of the registered character device driver.
+ * Only 1 instance of the driver is supported.
+ */
+#define SCXLNX_DEVICE_MINOR_NUMBER (0)
+
+struct SCXLNX_DEVICE *SCXLNXGetDevice(void);
+
+#define CLEAN_CACHE_CFG_MASK (~0xC) /* 1111 0011 */
+
+/*----------------------------------------------------------------------------*/
+/*
+ * Kernel Differences
+ */
+
+#ifdef CONFIG_ANDROID
+#define GROUP_INFO get_current_groups()
+#else
+#define GROUP_INFO (current->group_info)
+#endif
+
+#endif /* !defined(__SCXLNX_DEFS_H__) */
diff --git a/security/smc/omap4/scxlnx_device.c b/security/smc/omap4/scxlnx_device.c
new file mode 100644
index 0000000..cd9d56b
--- /dev/null
+++ b/security/smc/omap4/scxlnx_device.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <asm/atomic.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/page-flags.h>
+#include <linux/pm.h>
+#include <linux/sysdev.h>
+#include <linux/vmalloc.h>
+#include <linux/signal.h>
+#ifdef CONFIG_ANDROID
+#include <linux/device.h>
+#endif
+
+#include "scx_protocol.h"
+#include "scxlnx_defs.h"
+#include "scxlnx_util.h"
+#ifdef CONFIG_TF_MSHIELD
+#include <plat/cpu.h>
+#include "scx_public_crypto.h"
+#endif
+
+/* The single device supported by this driver */
+static struct SCXLNX_DEVICE g_SCXLNXDevice = {0, };
+
+/*----------------------------------------------------------------------------
+ * Implementations
+ *----------------------------------------------------------------------------*/
+
+struct SCXLNX_DEVICE *SCXLNXGetDevice(void)
+{
+ return &g_SCXLNXDevice;
+}
+
+/*----------------------------------------------------------------------------*/
+
+static int __init register_dmcrypt_engines(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Entered register_dmcrypt_engines");
+
+ ret = SCXPublicCryptoInit();
+ if (ret) {
+ printk(KERN_ERR "register_dmcrypt_engines():"
+ " SCXPublicCryptoInit failed, (error %d)!\n", ret);
+ goto out;
+ }
+
+ ret = register_smc_public_crypto_aes();
+ if (ret) {
+ printk(KERN_ERR "register_dmcrypt_engines():"
+ " regiser_smc_public_crypto_aes failed, (error %d)!\n", ret);
+ goto out;
+ }
+
+ ret = register_smc_public_crypto_digest();
+ if (ret) {
+ printk(KERN_ERR "register_dmcrypt_engines():"
+ " regiser_smc_public_crypto_digest failed, (error %d)!\n", ret);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+module_init(register_dmcrypt_engines);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/smc/omap4/scxlnx_mshield.h b/security/smc/omap4/scxlnx_mshield.h
new file mode 100644
index 0000000..9457ca9
--- /dev/null
+++ b/security/smc/omap4/scxlnx_mshield.h
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) 2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#ifndef __SCXLNX_MSHIELD_H__
+#define __SCXLNX_MSHIELD_H__
+
+#include "scxlnx_defs.h"
+
+int SCXLNXCtrlDeviceRegister(void);
+
+int SCXLNXCommStart(struct SCXLNX_COMM *pComm,
+ u32 nWorkspaceAddr, u32 nWorkspaceSize,
+ u8 *pPABufferVAddr, u32 nPABufferSize,
+ u8 *pPropertiesBuffer, u32 nPropertiesBufferLength);
+
+/* Assembler entry points to/from secure */
+u32 schedule_secure_world(u32 app_id, u32 proc_id, u32 flags, u32 args);
+u32 rpc_handler(u32 p1, u32 p2, u32 p3, u32 p4);
+u32 read_mpidr(void);
+
+/* L4 SEC clockdomain enabling/disabling */
+void tf_l4sec_clkdm_wakeup(bool use_spin_lock, bool wakelock);
+void tf_l4sec_clkdm_allow_idle(bool use_spin_lock, bool wakeunlock);
+
+/* Delayed secure resume */
+int tf_delayed_secure_resume(void);
+
+#endif
diff --git a/security/smc/omap4/scxlnx_util.c b/security/smc/omap4/scxlnx_util.c
new file mode 100644
index 0000000..90cd831
--- /dev/null
+++ b/security/smc/omap4/scxlnx_util.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#include <linux/mman.h>
+#include "scxlnx_util.h"
+
+void *internal_kmalloc(size_t nSize, int nPriority)
+{
+ void *pResult;
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ pResult = kmalloc(nSize, nPriority);
+
+ if (pResult != NULL)
+ atomic_inc(
+ &pDevice->sDeviceStats.stat_memories_allocated);
+
+ return pResult;
+}
+
+void internal_kfree(void *pMemory)
+{
+ struct SCXLNX_DEVICE *pDevice = SCXLNXGetDevice();
+
+ if (pMemory != NULL)
+ atomic_dec(
+ &pDevice->sDeviceStats.stat_memories_allocated);
+ return kfree(pMemory);
+}
+
diff --git a/security/smc/omap4/scxlnx_util.h b/security/smc/omap4/scxlnx_util.h
new file mode 100644
index 0000000..4569ec2
--- /dev/null
+++ b/security/smc/omap4/scxlnx_util.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2006-2010 Trusted Logic S.A.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#ifndef __SCXLNX_UTIL_H__
+#define __SCXLNX_UTIL_H__
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/crypto.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+#include "scx_protocol.h"
+#include "scxlnx_defs.h"
+
+/*----------------------------------------------------------------------------
+ * Debug printing routines
+ *----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
+
+void addressCacheProperty(unsigned long va);
+
+#define dprintk printk
+
+void SCXLNXDumpL1SharedBuffer(struct SCHANNEL_C1S_BUFFER *pBuf);
+
+void SCXLNXDumpMessage(union SCX_COMMAND_MESSAGE *pMessage);
+
+void SCXLNXDumpAnswer(union SCX_ANSWER_MESSAGE *pAnswer);
+
+#ifdef CONFIG_SMC_BENCH_SECURE_CYCLE
+void setupCounters(void);
+void runBogoMIPS(void);
+int runCodeSpeed(unsigned int nLoop);
+int runDataSpeed(unsigned int nLoop, unsigned long nVA);
+#endif /* CONFIG_SMC_BENCH_SECURE_CYCLE */
+
+#else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define dprintk(args...) do { ; } while (0)
+#define SCXLNXDumpL1SharedBuffer(pBuf) ((void) 0)
+#define SCXLNXDumpMessage(pMessage) ((void) 0)
+#define SCXLNXDumpAnswer(pAnswer) ((void) 0)
+
+#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
+
+#define SHA1_DIGEST_SIZE 20
+
+/*----------------------------------------------------------------------------
+ * Process identification
+ *----------------------------------------------------------------------------*/
+
+int SCXLNXConnGetCurrentProcessHash(void *pHash);
+
+int SCXLNXConnHashApplicationPathAndData(char *pBuffer, void *pData,
+ u32 nDataLen);
+
+/*----------------------------------------------------------------------------
+ * Statistic computation
+ *----------------------------------------------------------------------------*/
+
+void *internal_kmalloc(size_t nSize, int nPriority);
+void internal_kfree(void *pMemory);
+void internal_vunmap(void *pMemory);
+void *internal_vmalloc(size_t nSize);
+void internal_vfree(void *pMemory);
+unsigned long internal_get_zeroed_page(int nPriority);
+void internal_free_page(unsigned long pPage);
+int internal_get_user_pages(
+ struct task_struct *tsk,
+ struct mm_struct *mm,
+ unsigned long start,
+ int len,
+ int write,
+ int force,
+ struct page **pages,
+ struct vm_area_struct **vmas);
+void internal_get_page(struct page *page);
+void internal_page_cache_release(struct page *page);
+#endif /* __SCXLNX_UTIL_H__ */
+