summaryrefslogtreecommitdiffstats
path: root/src/crypto/sha
diff options
context:
space:
mode:
authorKenny Root <kroot@google.com>2015-09-25 00:26:37 +0000
committerKenny Root <kroot@google.com>2015-09-25 00:26:37 +0000
commita04d78d392463df4e69a64360c952ffa5abd22f7 (patch)
treedc62c249d595198e0d99e43890019d21e901fbec /src/crypto/sha
parent1e4884f615b20946411a74e41eb9c6aa65e2d5f3 (diff)
downloadexternal_boringssl-a04d78d392463df4e69a64360c952ffa5abd22f7.zip
external_boringssl-a04d78d392463df4e69a64360c952ffa5abd22f7.tar.gz
external_boringssl-a04d78d392463df4e69a64360c952ffa5abd22f7.tar.bz2
Revert "external/boringssl: sync with upstream."
This reverts commit 1e4884f615b20946411a74e41eb9c6aa65e2d5f3. This breaks some x86 builds. Change-Id: I4d4310663ce52bc0a130e6b9dbc22b868ff4fb25
Diffstat (limited to 'src/crypto/sha')
-rw-r--r--src/crypto/sha/CMakeLists.txt2
-rw-r--r--src/crypto/sha/asm/sha1-586.pl4
-rw-r--r--src/crypto/sha/asm/sha1-armv4-large.pl2
-rw-r--r--src/crypto/sha/asm/sha1-armv8.pl2
-rw-r--r--src/crypto/sha/asm/sha256-586.pl2
-rw-r--r--src/crypto/sha/asm/sha256-armv4.pl2
-rw-r--r--src/crypto/sha/asm/sha512-586.pl2
-rw-r--r--src/crypto/sha/asm/sha512-armv4.pl2
-rw-r--r--src/crypto/sha/asm/sha512-armv8.pl2
9 files changed, 10 insertions, 10 deletions
diff --git a/src/crypto/sha/CMakeLists.txt b/src/crypto/sha/CMakeLists.txt
index ecff09b..5a10c85 100644
--- a/src/crypto/sha/CMakeLists.txt
+++ b/src/crypto/sha/CMakeLists.txt
@@ -1,4 +1,4 @@
-include_directories(../../include)
+include_directories(. .. ../../include)
if (${ARCH} STREQUAL "x86_64")
set(
diff --git a/src/crypto/sha/asm/sha1-586.pl b/src/crypto/sha/asm/sha1-586.pl
index e0b5d83..4895eb3 100644
--- a/src/crypto/sha/asm/sha1-586.pl
+++ b/src/crypto/sha/asm/sha1-586.pl
@@ -66,9 +66,9 @@
# switch to AVX alone improves performance by as little as 4% in
# comparison to SSSE3 code path. But below result doesn't look like
# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as
-# pair of µ-ops, and it's the additional µ-ops, two per round, that
+# pair of µ-ops, and it's the additional µ-ops, two per round, that
# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded
-# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
+# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
# equivalent 'sh[rl]d' that is responsible for the impressive 5.1
# cycles per processed byte. But 'sh[rl]d' is not something that used
# to be fast, nor does it appear to be fast in upcoming Bulldozer
diff --git a/src/crypto/sha/asm/sha1-armv4-large.pl b/src/crypto/sha/asm/sha1-armv4-large.pl
index 64e2ed6..a20d336 100644
--- a/src/crypto/sha/asm/sha1-armv4-large.pl
+++ b/src/crypto/sha/asm/sha1-armv4-large.pl
@@ -178,7 +178,7 @@ ___
}
$code=<<___;
-#include <openssl/arm_arch.h>
+#include "arm_arch.h"
.text
.code 32
diff --git a/src/crypto/sha/asm/sha1-armv8.pl b/src/crypto/sha/asm/sha1-armv8.pl
index 1c4fe4a..a8c08c2 100644
--- a/src/crypto/sha/asm/sha1-armv8.pl
+++ b/src/crypto/sha/asm/sha1-armv8.pl
@@ -162,7 +162,7 @@ ___
}
$code.=<<___;
-#include <openssl/arm_arch.h>
+#include "arm_arch.h"
.text
diff --git a/src/crypto/sha/asm/sha256-586.pl b/src/crypto/sha/asm/sha256-586.pl
index e907714..6462e45 100644
--- a/src/crypto/sha/asm/sha256-586.pl
+++ b/src/crypto/sha/asm/sha256-586.pl
@@ -10,7 +10,7 @@
# SHA256 block transform for x86. September 2007.
#
# Performance improvement over compiler generated code varies from
-# 10% to 40% [see below]. Not very impressive on some µ-archs, but
+# 10% to 40% [see below]. Not very impressive on some µ-archs, but
# it's 5 times smaller and optimizies amount of writes.
#
# May 2012.
diff --git a/src/crypto/sha/asm/sha256-armv4.pl b/src/crypto/sha/asm/sha256-armv4.pl
index 7e07147..df71676 100644
--- a/src/crypto/sha/asm/sha256-armv4.pl
+++ b/src/crypto/sha/asm/sha256-armv4.pl
@@ -168,7 +168,7 @@ ___
$code=<<___;
#ifndef __KERNEL__
-# include <openssl/arm_arch.h>
+# include "arm_arch.h"
#else
# define __ARM_ARCH__ __LINUX_ARM_ARCH__
# define __ARM_MAX_ARCH__ 7
diff --git a/src/crypto/sha/asm/sha512-586.pl b/src/crypto/sha/asm/sha512-586.pl
index 2f6a202..e96ec00 100644
--- a/src/crypto/sha/asm/sha512-586.pl
+++ b/src/crypto/sha/asm/sha512-586.pl
@@ -37,7 +37,7 @@
#
# IALU code-path is optimized for elder Pentiums. On vanilla Pentium
# performance improvement over compiler generated code reaches ~60%,
-# while on PIII - ~35%. On newer µ-archs improvement varies from 15%
+# while on PIII - ~35%. On newer µ-archs improvement varies from 15%
# to 50%, but it's less important as they are expected to execute SSE2
# code-path, which is commonly ~2-3x faster [than compiler generated
# code]. SSE2 code-path is as fast as original sha512-sse2.pl, even
diff --git a/src/crypto/sha/asm/sha512-armv4.pl b/src/crypto/sha/asm/sha512-armv4.pl
index cd3662a..2964a39 100644
--- a/src/crypto/sha/asm/sha512-armv4.pl
+++ b/src/crypto/sha/asm/sha512-armv4.pl
@@ -191,7 +191,7 @@ ___
}
$code=<<___;
#ifndef __KERNEL__
-# include <openssl/arm_arch.h>
+# include "arm_arch.h"
# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
# define VFP_ABI_POP vldmia sp!,{d8-d15}
#else
diff --git a/src/crypto/sha/asm/sha512-armv8.pl b/src/crypto/sha/asm/sha512-armv8.pl
index 40eb17a..43e7293 100644
--- a/src/crypto/sha/asm/sha512-armv8.pl
+++ b/src/crypto/sha/asm/sha512-armv8.pl
@@ -164,7 +164,7 @@ ___
}
$code.=<<___;
-#include <openssl/arm_arch.h>
+#include "arm_arch.h"
.text