aboutsummaryrefslogtreecommitdiffstats
path: root/security/smc/tf_crypto_digest.c
diff options
context:
space:
mode:
Diffstat (limited to 'security/smc/tf_crypto_digest.c')
-rw-r--r--security/smc/tf_crypto_digest.c135
1 files changed, 80 insertions, 55 deletions
diff --git a/security/smc/tf_crypto_digest.c b/security/smc/tf_crypto_digest.c
index d69a97f..8023839 100644
--- a/security/smc/tf_crypto_digest.c
+++ b/security/smc/tf_crypto_digest.c
@@ -151,12 +151,25 @@ static const u8 sha256OverEmptyString[] = {
static void tf_digest_hw_perform_64b(u32 *data,
u32 algo, u32 bytes_processed);
static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
- u32 algo, u32 bytes_processed);
+ u32 algo, u32 bytes_processed,
+ unsigned int buffer_origin);
static bool tf_digest_update_dma(
struct tf_crypto_sha_operation_state *sha_state,
- u8 *data, u32 data_length);
+ u8 *data, u32 data_length, unsigned int buffer_origin);
+/*------------------------------------------------------------------------- */
+
+static unsigned long tf_cpy_from(
+void *to, const void *from, unsigned long n, unsigned int buffer_origin)
+{
+ if (buffer_origin == TF_BUFFER_KERNEL) {
+ memcpy(to, from, n);
+ return 0;
+ } else {
+ return copy_from_user(to, from, n);
+ }
+}
/*-------------------------------------------------------------------------
*Save HWA registers into the specified operation state structure
@@ -164,8 +177,7 @@ static bool tf_digest_update_dma(
static void tf_digest_save_registers(
struct tf_crypto_sha_operation_state *sha_state)
{
- dprintk(KERN_INFO "tf_digest_save_registers: State=%p\n",
- sha_state);
+ dpr_info("%s: State=%p\n", __func__, sha_state);
sha_state->SHA_DIGEST_A = INREG32(&sha1_md5_reg->IDIGEST_A);
sha_state->SHA_DIGEST_B = INREG32(&sha1_md5_reg->IDIGEST_B);
@@ -183,8 +195,7 @@ static void tf_digest_save_registers(
static void tf_digest_restore_registers(
struct tf_crypto_sha_operation_state *sha_state)
{
- dprintk(KERN_INFO "tf_digest_restore_registers: State=%p\n",
- sha_state);
+ dpr_info("%s: State=%p\n", __func__, sha_state);
if (sha_state->bytes_processed != 0) {
/*
@@ -219,7 +230,7 @@ void tf_digest_exit(void)
}
bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
- u8 *data, u32 data_length)
+ u8 *data, u32 data_length, unsigned int buffer_origin)
{
u32 dma_use = PUBLIC_CRYPTO_DMA_USE_NONE;
@@ -229,15 +240,13 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
if (data_length >= DMA_TRIGGER_IRQ_DIGEST)
dma_use = PUBLIC_CRYPTO_DMA_USE_IRQ;
- dprintk(KERN_INFO "tf_digest_update : "\
- "Data=0x%08x/%u, Chunk=%u, Processed=%u, dma_use=0x%08x\n",
- (u32)data, (u32)data_length,
+ dpr_info("%s: Data=0x%08x/%u, Chunk=%u, Processed=%u, dma_use=0x%08x\n",
+ __func__, (u32)data, (u32)data_length,
sha_state->chunk_length, sha_state->bytes_processed,
dma_use);
if (data_length == 0) {
- dprintk(KERN_INFO "tf_digest_update: "\
- "Nothing to process\n");
+ dpr_info("%s: Nothing to process\n", __func__);
return true;
}
@@ -249,7 +258,8 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
tf_digest_restore_registers(sha_state);
/*perform the updates with DMA */
- if (!tf_digest_update_dma(sha_state, data, data_length))
+ if (!tf_digest_update_dma(
+ sha_state, data, data_length, buffer_origin))
return false;
/* Save the accelerator registers into the operation state */
@@ -274,10 +284,11 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
/*So we fill the chunk buffer with the new data to
*complete to 64B */
- if (copy_from_user(
+ if (tf_cpy_from(
sha_state->chunk_buffer+sha_state->chunk_length,
data,
- vLengthToComplete))
+ vLengthToComplete,
+ buffer_origin))
return false;
if (sha_state->chunk_length + data_length ==
@@ -285,9 +296,8 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
/*We'll keep some data for the final */
sha_state->chunk_length =
HASH_BLOCK_BYTES_LENGTH;
- dprintk(KERN_INFO "tf_digest_update: "\
- "Done: Chunk=%u; Processed=%u\n",
- sha_state->chunk_length,
+ dpr_info("%s: Done: Chunk=%u; Processed=%u\n",
+ __func__, sha_state->chunk_length,
sha_state->bytes_processed);
return true;
}
@@ -332,10 +342,11 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
*/
/*We copy the data to process to an aligned
*buffer */
- if (copy_from_user(
+ if (tf_cpy_from(
pTempAlignedBuffer,
data,
- HASH_BLOCK_BYTES_LENGTH))
+ HASH_BLOCK_BYTES_LENGTH,
+ buffer_origin))
return false;
/*Then we send this buffer to the hash
@@ -368,18 +379,18 @@ bool tf_digest_update(struct tf_crypto_sha_operation_state *sha_state,
/*So we fill the chunk buffer with the new data to
*complete to 64B */
- if (copy_from_user(
+ if (tf_cpy_from(
sha_state->chunk_buffer+sha_state->chunk_length,
data,
- data_length))
+ data_length,
+ buffer_origin))
return false;
sha_state->chunk_length += data_length;
}
}
- dprintk(KERN_INFO "tf_digest_update: Done: "\
- "Chunk=%u; Processed=%u\n",
- sha_state->chunk_length, sha_state->bytes_processed);
+ dpr_info("%s: Done: Chunk=%u; Processed=%u\n",
+ __func__, sha_state->chunk_length, sha_state->bytes_processed);
return true;
}
@@ -443,7 +454,8 @@ static void tf_digest_hw_perform_64b(u32 *data,
/*------------------------------------------------------------------------- */
static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
- u32 algo, u32 bytes_processed)
+ u32 algo, u32 bytes_processed,
+ unsigned int buffer_origin)
{
/*
*Note: The DMA only sees physical addresses !
@@ -455,9 +467,9 @@ static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
u32 algo_constant;
struct tf_device *dev = tf_get_device();
- dprintk(KERN_INFO
- "tf_digest_hw_perform_dma: Buffer=0x%08x/%u\n",
- (u32)data, (u32)nDataLength);
+ dpr_info(
+ "%s: Buffer=0x%08x/%u\n",
+ __func__, (u32)data, (u32)nDataLength);
/*lock the DMA */
mutex_lock(&dev->sm.dma_mutex);
@@ -486,7 +498,8 @@ static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
* buffer which has correct properties from efficient DMA
* transfers.
*/
- if (copy_from_user(dev->dma_buffer, data, length_loop)) {
+ if (tf_cpy_from(
+ dev->dma_buffer, data, length_loop, buffer_origin)) {
omap_free_dma(dma_ch0);
mutex_unlock(&dev->sm.dma_mutex);
return false;
@@ -568,9 +581,9 @@ static bool tf_digest_hw_perform_dma(u8 *data, u32 nDataLength,
*/
static bool tf_digest_update_dma(
struct tf_crypto_sha_operation_state *sha_state,
- u8 *data, u32 data_length)
+ u8 *data, u32 data_length, unsigned int buffer_origin)
{
- dprintk(KERN_INFO "tf_digest_update_dma\n");
+ dpr_info("%s\n", __func__);
if (sha_state->chunk_length != 0) {
@@ -581,9 +594,9 @@ static bool tf_digest_update_dma(
chunk_length + data_length <= HASH_BLOCK_BYTES_LENGTH) {
/*So we fill the chunk buffer with the new data */
- if (copy_from_user(sha_state->chunk_buffer +
+ if (tf_cpy_from(sha_state->chunk_buffer +
sha_state->chunk_length, data,
- data_length))
+ data_length, buffer_origin))
return false;
sha_state->chunk_length += data_length;
@@ -597,9 +610,9 @@ static bool tf_digest_update_dma(
if (vLengthToComplete != 0) {
/*So we fill the chunk buffer with the new data to
*complete to 64B */
- if (copy_from_user(sha_state->chunk_buffer +
+ if (tf_cpy_from(sha_state->chunk_buffer +
sha_state->chunk_length, data,
- vLengthToComplete))
+ vLengthToComplete, buffer_origin))
return false;
}
@@ -631,7 +644,8 @@ static bool tf_digest_update_dma(
}
if (!tf_digest_hw_perform_dma(data, vDmaProcessize,
- sha_state->CTRL, sha_state->bytes_processed))
+ sha_state->CTRL, sha_state->bytes_processed,
+ buffer_origin))
return false;
sha_state->bytes_processed =
@@ -646,7 +660,8 @@ static bool tf_digest_update_dma(
return false;
/*We now fill the chunk buffer with the remaining data */
- if (copy_from_user(sha_state->chunk_buffer, data, data_length))
+ if (tf_cpy_from(
+ sha_state->chunk_buffer, data, data_length, buffer_origin))
return false;
sha_state->chunk_length = data_length;
@@ -693,6 +708,24 @@ static int static_Hash_HwReadDigest(u32 algo, u8 *out)
out[idx++] = (u8) ((tmp >> 24) & 0xff);
}
+#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
+#define FAULTY(mask, ctrl_algo, alg_name) \
+ (((mask) & TF_CRYPTO_ALG_##alg_name) && \
+ (ctrl_algo) == DIGEST_CTRL_ALGO_##alg_name)
+ if (FAULTY(tf_fault_injection_mask, algo, MD5) ||
+ FAULTY(tf_fault_injection_mask, algo, SHA1) ||
+ FAULTY(tf_fault_injection_mask, algo, SHA224) ||
+ FAULTY(tf_fault_injection_mask, algo, SHA256)) {
+ pr_notice("TF: injecting fault in digest!\n");
+ out[0] = 0xff;
+ out[1] ^= 0xff;
+ } else {
+ dpr_info("%s: no fault "
+ "(mask=0x%x algo=%u)\n",
+ __func__, tf_fault_injection_mask, algo);
+ }
+#undef FAULTY
+#endif /* CONFIG_TF_DRIVER_FAULT_INJECTION */
return 0;
}
@@ -773,24 +806,21 @@ static int tf_digest_final(struct tf_crypto_sha_operation_state *state,
* Digest HWA registration into kernel crypto framework
*/
+static DEFINE_SPINLOCK(digest_lock);
+
static int digest_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
- /* Make sure SHA/MD5 HWA is accessible */
- tf_delayed_secure_resume();
-
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
-
tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
- tf_digest_update(state, (u8 *) data, len);
+ spin_lock(&digest_lock);
+ tf_digest_update(state, (u8 *) data, len, TF_BUFFER_KERNEL);
+ spin_unlock(&digest_lock);
tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
-
return 0;
}
@@ -799,19 +829,14 @@ static int digest_final(struct shash_desc *desc, u8 *out)
int ret;
struct tf_crypto_sha_operation_state *state = shash_desc_ctx(desc);
- /* Make sure SHA/MD5 HWA is accessible */
- tf_delayed_secure_resume();
-
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, LOCK_HWA);
-
tf_crypto_enable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
+ spin_lock(&digest_lock);
ret = tf_digest_final(state, out);
+ spin_unlock(&digest_lock);
tf_crypto_disable_clock(PUBLIC_CRYPTO_SHA2MD5_CLOCK_REG);
- tf_crypto_lock_hwa(PUBLIC_CRYPTO_HWA_SHA, UNLOCK_HWA);
-
return ret;
}
@@ -951,7 +976,7 @@ int register_smc_public_crypto_digest(void)
{
int ret;
- dprintk(KERN_INFO "SMC: Registering digest algorithms\n");
+ dpr_info("SMC: Registering digest algorithms\n");
ret = crypto_register_shash(&smc_md5_alg);
if (ret)
@@ -982,7 +1007,7 @@ sha1_err:
void unregister_smc_public_crypto_digest(void)
{
- dprintk(KERN_INFO "SMC: Unregistering digest algorithms\n");
+ dpr_info("SMC: Unregistering digest algorithms\n");
crypto_unregister_shash(&smc_md5_alg);
crypto_unregister_shash(&smc_sha1_alg);