summaryrefslogtreecommitdiffstats
path: root/linux-arm/crypto
diff options
context:
space:
mode:
authorKenny Root <kroot@google.com>2015-09-25 00:44:37 +0000
committerAndroid Git Automerger <android-git-automerger@android.com>2015-09-25 00:44:37 +0000
commite246de8f184e644debf965ecdca552f006b56881 (patch)
treedc62c249d595198e0d99e43890019d21e901fbec /linux-arm/crypto
parentc737bc23bc868fff21e5c1b95940813f709ea550 (diff)
parent00bc53f6f4436972b7a8dcf2c1e5fd0ad7515872 (diff)
downloadexternal_boringssl-e246de8f184e644debf965ecdca552f006b56881.zip
external_boringssl-e246de8f184e644debf965ecdca552f006b56881.tar.gz
external_boringssl-e246de8f184e644debf965ecdca552f006b56881.tar.bz2
am 00bc53f6: am a04d78d3: Revert "external/boringssl: sync with upstream."
* commit '00bc53f6f4436972b7a8dcf2c1e5fd0ad7515872': Revert "external/boringssl: sync with upstream."
Diffstat (limited to 'linux-arm/crypto')
-rw-r--r--linux-arm/crypto/aes/aes-armv4.S4
-rw-r--r--linux-arm/crypto/aes/aesv8-armx.S (renamed from linux-arm/crypto/aes/aesv8-armx32.S)4
-rw-r--r--linux-arm/crypto/aes/bsaes-armv7.S4
-rw-r--r--linux-arm/crypto/bn/armv4-mont.S4
-rw-r--r--linux-arm/crypto/modes/ghash-armv4.S4
-rw-r--r--linux-arm/crypto/modes/ghashv8-armx.S (renamed from linux-arm/crypto/modes/ghashv8-armx32.S)26
-rw-r--r--linux-arm/crypto/sha/sha1-armv4-large.S4
-rw-r--r--linux-arm/crypto/sha/sha256-armv4.S4
-rw-r--r--linux-arm/crypto/sha/sha512-armv4.S4
9 files changed, 20 insertions, 38 deletions
diff --git a/linux-arm/crypto/aes/aes-armv4.S b/linux-arm/crypto/aes/aes-armv4.S
index c4d7065..1135020 100644
--- a/linux-arm/crypto/aes/aes-armv4.S
+++ b/linux-arm/crypto/aes/aes-armv4.S
@@ -1,4 +1,3 @@
-#if defined(__arm__)
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -34,7 +33,7 @@
#if defined(__arm__)
#ifndef __KERNEL__
-# include <openssl/arm_arch.h>
+# include "arm_arch.h"
#else
# define __ARM_ARCH__ __LINUX_ARM_ARCH__
#endif
@@ -1197,4 +1196,3 @@ _armv4_AES_decrypt:
.align 2
#endif
-#endif \ No newline at end of file
diff --git a/linux-arm/crypto/aes/aesv8-armx32.S b/linux-arm/crypto/aes/aesv8-armx.S
index 6012b0c..006300c 100644
--- a/linux-arm/crypto/aes/aesv8-armx32.S
+++ b/linux-arm/crypto/aes/aesv8-armx.S
@@ -1,5 +1,4 @@
-#if defined(__arm__)
-#include <openssl/arm_arch.h>
+#include "arm_arch.h"
#if __ARM_MAX_ARCH__>=7
.text
@@ -753,4 +752,3 @@ aes_v8_ctr32_encrypt_blocks:
ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,pc}
.size aes_v8_ctr32_encrypt_blocks,.-aes_v8_ctr32_encrypt_blocks
#endif
-#endif \ No newline at end of file
diff --git a/linux-arm/crypto/aes/bsaes-armv7.S b/linux-arm/crypto/aes/bsaes-armv7.S
index 85262d5..204ee3e 100644
--- a/linux-arm/crypto/aes/bsaes-armv7.S
+++ b/linux-arm/crypto/aes/bsaes-armv7.S
@@ -1,4 +1,3 @@
-#if defined(__arm__)
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -49,7 +48,7 @@
#if defined(__arm__)
#ifndef __KERNEL__
-# include <openssl/arm_arch.h>
+# include "arm_arch.h"
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
@@ -2576,4 +2575,3 @@ bsaes_xts_decrypt:
.size bsaes_xts_decrypt,.-bsaes_xts_decrypt
#endif
#endif
-#endif \ No newline at end of file
diff --git a/linux-arm/crypto/bn/armv4-mont.S b/linux-arm/crypto/bn/armv4-mont.S
index fc671e8..81dcbeb 100644
--- a/linux-arm/crypto/bn/armv4-mont.S
+++ b/linux-arm/crypto/bn/armv4-mont.S
@@ -1,5 +1,4 @@
-#if defined(__arm__)
-#include <openssl/arm_arch.h>
+#include "arm_arch.h"
.text
.code 32
@@ -586,4 +585,3 @@ bn_mul8x_mont_neon:
.comm OPENSSL_armcap_P,4,4
.hidden OPENSSL_armcap_P
#endif
-#endif \ No newline at end of file
diff --git a/linux-arm/crypto/modes/ghash-armv4.S b/linux-arm/crypto/modes/ghash-armv4.S
index f868c2d..b6c7e9b 100644
--- a/linux-arm/crypto/modes/ghash-armv4.S
+++ b/linux-arm/crypto/modes/ghash-armv4.S
@@ -1,6 +1,5 @@
#if defined(__arm__)
-#if defined(__arm__)
-#include <openssl/arm_arch.h>
+#include "arm_arch.h"
.syntax unified
@@ -538,4 +537,3 @@ gcm_ghash_neon:
.align 2
#endif
-#endif \ No newline at end of file
diff --git a/linux-arm/crypto/modes/ghashv8-armx32.S b/linux-arm/crypto/modes/ghashv8-armx.S
index 9a38ded..71913fb 100644
--- a/linux-arm/crypto/modes/ghashv8-armx32.S
+++ b/linux-arm/crypto/modes/ghashv8-armx.S
@@ -1,5 +1,4 @@
-#if defined(__arm__)
-#include <openssl/arm_arch.h>
+#include "arm_arch.h"
.text
.fpu neon
@@ -67,10 +66,10 @@ gcm_gmult_v8:
#endif
vext.8 q3,q9,q9,#8
-.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
+.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
veor q9,q9,q3 @ Karatsuba pre-processing
-.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
-.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
+.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
+.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2
@@ -135,7 +134,7 @@ gcm_ghash_v8:
#endif
vext.8 q7,q9,q9,#8
veor q3,q3,q0 @ I[i]^=Xi
-.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
+.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
veor q9,q9,q7 @ Karatsuba pre-processing
.byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7
b .Loop_mod2x_v8
@@ -144,14 +143,14 @@ gcm_ghash_v8:
.Loop_mod2x_v8:
vext.8 q10,q3,q3,#8
subs r3,r3,#32 @ is there more data?
-.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
+.byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo
movlo r12,#0 @ is it time to zero r12?
.byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9
veor q10,q10,q3 @ Karatsuba pre-processing
-.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
+.byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi
veor q0,q0,q4 @ accumulate
-.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
+.byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2]
veor q2,q2,q6
@@ -176,7 +175,7 @@ gcm_ghash_v8:
vext.8 q7,q9,q9,#8
vext.8 q3,q8,q8,#8
veor q0,q1,q10
-.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
+.byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1
veor q3,q3,q2 @ accumulate q3 early
vext.8 q10,q0,q0,#8 @ 2nd phase of reduction
@@ -197,10 +196,10 @@ gcm_ghash_v8:
veor q3,q3,q0 @ inp^=Xi
veor q9,q8,q10 @ q9 is rotated inp^Xi
-.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
+.byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
veor q9,q9,q3 @ Karatsuba pre-processing
-.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
-.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
+.byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
+.byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
vext.8 q9,q0,q2,#8 @ Karatsuba post-processing
veor q10,q0,q2
@@ -230,4 +229,3 @@ gcm_ghash_v8:
.byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 2
.align 2
-#endif \ No newline at end of file
diff --git a/linux-arm/crypto/sha/sha1-armv4-large.S b/linux-arm/crypto/sha/sha1-armv4-large.S
index 66d0ef3..52c99bf 100644
--- a/linux-arm/crypto/sha/sha1-armv4-large.S
+++ b/linux-arm/crypto/sha/sha1-armv4-large.S
@@ -1,5 +1,4 @@
-#if defined(__arm__)
-#include <openssl/arm_arch.h>
+#include "arm_arch.h"
.text
.code 32
@@ -1459,4 +1458,3 @@ sha1_block_data_order_armv8:
.comm OPENSSL_armcap_P,4,4
.hidden OPENSSL_armcap_P
#endif
-#endif \ No newline at end of file
diff --git a/linux-arm/crypto/sha/sha256-armv4.S b/linux-arm/crypto/sha/sha256-armv4.S
index 9fc3e0b..114aa43 100644
--- a/linux-arm/crypto/sha/sha256-armv4.S
+++ b/linux-arm/crypto/sha/sha256-armv4.S
@@ -1,4 +1,3 @@
-#if defined(__arm__)
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -38,7 +37,7 @@
@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
#ifndef __KERNEL__
-# include <openssl/arm_arch.h>
+# include "arm_arch.h"
#else
# define __ARM_ARCH__ __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ 7
@@ -2815,4 +2814,3 @@ sha256_block_data_order_armv8:
.comm OPENSSL_armcap_P,4,4
.hidden OPENSSL_armcap_P
#endif
-#endif \ No newline at end of file
diff --git a/linux-arm/crypto/sha/sha512-armv4.S b/linux-arm/crypto/sha/sha512-armv4.S
index 834ede9..1a3d467 100644
--- a/linux-arm/crypto/sha/sha512-armv4.S
+++ b/linux-arm/crypto/sha/sha512-armv4.S
@@ -1,4 +1,3 @@
-#if defined(__arm__)
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -47,7 +46,7 @@
@ was reflected in below two parameters as 0 and 4. Now caller is
@ expected to maintain native byte order for whole 64-bit values.
#ifndef __KERNEL__
-# include <openssl/arm_arch.h>
+# include "arm_arch.h"
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
#else
@@ -1866,4 +1865,3 @@ sha512_block_data_order_neon:
.comm OPENSSL_armcap_P,4,4
.hidden OPENSSL_armcap_P
#endif
-#endif \ No newline at end of file