aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorMatt Mackall <mpm@selenic.com>2008-04-29 01:03:00 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 08:06:24 -0700
commit1c0ad3d492adf670e47bf0a3d65c6ba5cdee0114 (patch)
treeb043456b0ddb74dfbff51efa57170a9c38eac729 /drivers/char/random.c
parentffd8d3fa5813430fe3926fe950fde23630f6b1a0 (diff)
downloadkernel_samsung_aries-1c0ad3d492adf670e47bf0a3d65c6ba5cdee0114.zip
kernel_samsung_aries-1c0ad3d492adf670e47bf0a3d65c6ba5cdee0114.tar.gz
kernel_samsung_aries-1c0ad3d492adf670e47bf0a3d65c6ba5cdee0114.tar.bz2
random: make backtracking attacks harder
At each extraction, we change (poolbits / 16) + 32 bits in the pool, or 96 bits in the case of the secondary pools. Thus, a brute-force backtracking attack on the pool state is less difficult than breaking the hash. In certain cases, this difficulty may be is reduced to 2^64 iterations. Instead, hash the entire pool in one go, then feedback the whole hash (160 bits) in one go. This will make backtracking at least as hard as inverting the hash. Signed-off-by: Matt Mackall <mpm@selenic.com> Cc: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d125a4b..e52f64c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -767,37 +767,35 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
int i;
__u32 extract[16], hash[5], workspace[SHA_WORKSPACE_WORDS];
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
sha_init(hash);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+ sha_transform(hash, (__u8 *)(r->pool + i), workspace);
+
/*
- * As we hash the pool, we mix intermediate values of
- * the hash back into the pool. This eliminates
- * backtracking attacks (where the attacker knows
- * the state of the pool plus the current outputs, and
- * attempts to find previous ouputs), unless the hash
- * function can be inverted.
+ * We mix the hash back into the pool to prevent backtracking
+ * attacks (where the attacker knows the state of the pool
+ * plus the current outputs, and attempts to find previous
+ * ouputs), unless the hash function can be inverted. By
+ * mixing at least a SHA1 worth of hash data back, we make
+ * brute-forcing the feedback as hard as brute-forcing the
+ * hash.
*/
- for (i = 0; i < r->poolinfo->poolwords; i += 16) {
- /* hash blocks of 16 words = 512 bits */
- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
- /* feed back portion of the resulting hash */
- add_entropy_words(r, &hash[i % 5], 1);
- }
+ __add_entropy_words(r, hash, 5, extract);
/*
- * To avoid duplicates, we atomically extract a
- * portion of the pool while mixing, and hash one
- * final time.
+ * To avoid duplicates, we atomically extract a portion of the
+ * pool while mixing, and hash one final time.
*/
- __add_entropy_words(r, &hash[i % 5], 1, extract);
sha_transform(hash, (__u8 *)extract, workspace);
memset(extract, 0, sizeof(extract));
memset(workspace, 0, sizeof(workspace));
/*
- * In case the hash function has some recognizable
- * output pattern, we fold it in half.
+ * In case the hash function has some recognizable output
+ * pattern, we fold it in half. Thus, we always feed back
+ * twice as much data as we output.
*/
-
hash[0] ^= hash[3];
hash[1] ^= hash[4];
hash[2] ^= rol32(hash[2], 16);