summaryrefslogtreecommitdiffstats
path: root/src/crypto/sha
diff options
context:
space:
mode:
Diffstat (limited to 'src/crypto/sha')
-rw-r--r--src/crypto/sha/asm/sha256-armv4.pl2
-rw-r--r--src/crypto/sha/sha1.c6
-rw-r--r--src/crypto/sha/sha256.c12
-rw-r--r--src/crypto/sha/sha512.c14
4 files changed, 21 insertions, 13 deletions
diff --git a/src/crypto/sha/asm/sha256-armv4.pl b/src/crypto/sha/asm/sha256-armv4.pl
index 778c3d9..df71676 100644
--- a/src/crypto/sha/asm/sha256-armv4.pl
+++ b/src/crypto/sha/asm/sha256-armv4.pl
@@ -479,7 +479,7 @@ sha256_block_data_order_neon:
stmdb sp!,{r4-r12,lr}
sub $H,sp,#16*4+16
- adr $Ktbl,K256
+ adrl $Ktbl,K256
bic $H,$H,#15 @ align for 128-bit stores
mov $t2,sp
mov sp,$H @ alloca
diff --git a/src/crypto/sha/sha1.c b/src/crypto/sha/sha1.c
index 60d09f6..c03e608 100644
--- a/src/crypto/sha/sha1.c
+++ b/src/crypto/sha/sha1.c
@@ -101,7 +101,7 @@ uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out) {
#define HASH_CBLOCK 64
#define HASH_MAKE_STRING(c, s) \
do { \
- unsigned long ll; \
+ uint32_t ll; \
ll = (c)->h0; \
(void) HOST_l2c(ll, (s)); \
ll = (c)->h1; \
@@ -188,8 +188,8 @@ void sha1_block_data_order(SHA_CTX *c, const void *p, size_t num);
#if !defined(SHA1_ASM)
static void HASH_BLOCK_DATA_ORDER(SHA_CTX *c, const void *p, size_t num) {
const uint8_t *data = p;
- register unsigned MD32_REG_T A, B, C, D, E, T, l;
- unsigned MD32_REG_T XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10,
+ register uint32_t A, B, C, D, E, T, l;
+ uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, XX8, XX9, XX10,
XX11, XX12, XX13, XX14, XX15;
A = c->h0;
diff --git a/src/crypto/sha/sha256.c b/src/crypto/sha/sha256.c
index 8d4106e..8276bbb 100644
--- a/src/crypto/sha/sha256.c
+++ b/src/crypto/sha/sha256.c
@@ -144,10 +144,13 @@ int SHA224_Final(uint8_t *md, SHA256_CTX *ctx) {
* to truncate to amount of bytes not divisible by 4. I bet not, but if it is,
* then default: case shall be extended. For reference. Idea behind separate
* cases for pre-defined lenghts is to let the compiler decide if it's
- * appropriate to unroll small loops. */
+ * appropriate to unroll small loops.
+ *
+ * TODO(davidben): The small |md_len| case is one of the few places a low-level
+ * hash 'final' function can fail. This should never happen. */
#define HASH_MAKE_STRING(c, s) \
do { \
- unsigned long ll; \
+ uint32_t ll; \
unsigned int nn; \
switch ((c)->md_len) { \
case SHA224_DIGEST_LENGTH: \
@@ -163,8 +166,9 @@ int SHA224_Final(uint8_t *md, SHA256_CTX *ctx) {
} \
break; \
default: \
- if ((c)->md_len > SHA256_DIGEST_LENGTH) \
+ if ((c)->md_len > SHA256_DIGEST_LENGTH) { \
return 0; \
+ } \
for (nn = 0; nn < (c)->md_len / 4; nn++) { \
ll = (c)->h[nn]; \
(void) HOST_l2c(ll, (s)); \
@@ -232,7 +236,7 @@ static const HASH_LONG K256[64] = {
static void sha256_block_data_order(SHA256_CTX *ctx, const void *in,
size_t num) {
- unsigned MD32_REG_T a, b, c, d, e, f, g, h, s0, s1, T1;
+ uint32_t a, b, c, d, e, f, g, h, s0, s1, T1;
HASH_LONG X[16];
int i;
const uint8_t *data = in;
diff --git a/src/crypto/sha/sha512.c b/src/crypto/sha/sha512.c
index 2acefb1..57c96ab 100644
--- a/src/crypto/sha/sha512.c
+++ b/src/crypto/sha/sha512.c
@@ -166,7 +166,7 @@ static
void sha512_block_data_order(SHA512_CTX *ctx, const void *in, size_t num);
-int SHA384_Final(unsigned char *md, SHA512_CTX *sha) {
+int SHA384_Final(uint8_t *md, SHA512_CTX *sha) {
return SHA512_Final(md, sha);
}
@@ -174,7 +174,7 @@ int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len) {
return SHA512_Update(sha, data, len);
}
-void SHA512_Transform(SHA512_CTX *c, const unsigned char *data) {
+void SHA512_Transform(SHA512_CTX *c, const uint8_t *data) {
#ifndef SHA512_BLOCK_CAN_MANAGE_UNALIGNED_DATA
if ((size_t)data % sizeof(c->u.d[0]) != 0) {
memcpy(c->u.p, data, sizeof(c->u.p));
@@ -244,7 +244,7 @@ int SHA512_Update(SHA512_CTX *c, const void *in_data, size_t len) {
return 1;
}
-int SHA512_Final(unsigned char *md, SHA512_CTX *sha) {
+int SHA512_Final(uint8_t *md, SHA512_CTX *sha) {
uint8_t *p = (uint8_t *)sha->u.p;
size_t n = sha->num;
@@ -276,7 +276,9 @@ int SHA512_Final(unsigned char *md, SHA512_CTX *sha) {
sha512_block_data_order(sha, p, 1);
- if (md == 0) {
+ if (md == NULL) {
+ /* TODO(davidben): This NULL check is absent in other low-level hash 'final'
+ * functions and is one of the few places one can fail. */
return 0;
}
@@ -312,6 +314,8 @@ int SHA512_Final(unsigned char *md, SHA512_CTX *sha) {
break;
/* ... as well as make sure md_len is not abused. */
default:
+ /* TODO(davidben): This bad |md_len| case is one of the few places a
+ * low-level hash 'final' function can fail. This should never happen. */
return 0;
}
@@ -415,7 +419,7 @@ static uint64_t __fastcall __pull64be(const void *x) {
#ifndef PULL64
#define B(x, j) \
- (((uint64_t)(*(((const unsigned char *)(&x)) + j))) << ((7 - j) * 8))
+ (((uint64_t)(*(((const uint8_t *)(&x)) + j))) << ((7 - j) * 8))
#define PULL64(x) \
(B(x, 0) | B(x, 1) | B(x, 2) | B(x, 3) | B(x, 4) | B(x, 5) | B(x, 6) | \
B(x, 7))