diff options
author | H. Peter Anvin <hpa@linux.intel.com> | 2012-07-27 22:26:08 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2012-07-27 22:37:20 -0400 |
commit | d2e7c96af1e54b507ae2a6a7dd2baf588417a7e5 (patch) | |
tree | a60b31adede15169f7835e8a4b10151f20e8bd24 /drivers/char/random.c | |
parent | d114a33387472555188f142ed8e98acdb8181c6d (diff) |
random: mix in architectural randomness in extract_buf()
Mix in any architectural randomness in extract_buf() instead of
xfer_secondary_buf(). This allows us to mix in more architectural
randomness, and it also makes xfer_secondary_buf() faster, moving a
tiny bit of additional CPU overhead to process which is extracting the
randomness.
[ Commit description modified by tytso to remove an extended
advertisement for the RDRAND instruction. ]
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: DJ Johnston <dj.johnston@intel.com>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@vger.kernel.org
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r-- | drivers/char/random.c | 56 |
1 files changed, 32 insertions, 24 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 1a2dfa816041..b86eae9b77df 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -277,6 +277,8 @@ | |||
277 | #define SEC_XFER_SIZE 512 | 277 | #define SEC_XFER_SIZE 512 |
278 | #define EXTRACT_SIZE 10 | 278 | #define EXTRACT_SIZE 10 |
279 | 279 | ||
280 | #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) | ||
281 | |||
280 | /* | 282 | /* |
281 | * The minimum number of bits of entropy before we wake up a read on | 283 | * The minimum number of bits of entropy before we wake up a read on |
282 | * /dev/random. Should be enough to do a significant reseed. | 284 | * /dev/random. Should be enough to do a significant reseed. |
@@ -813,11 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, | |||
813 | */ | 815 | */ |
814 | static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | 816 | static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) |
815 | { | 817 | { |
816 | union { | 818 | __u32 tmp[OUTPUT_POOL_WORDS]; |
817 | __u32 tmp[OUTPUT_POOL_WORDS]; | ||
818 | long hwrand[4]; | ||
819 | } u; | ||
820 | int i; | ||
821 | 819 | ||
822 | if (r->pull && r->entropy_count < nbytes * 8 && | 820 | if (r->pull && r->entropy_count < nbytes * 8 && |
823 | r->entropy_count < r->poolinfo->POOLBITS) { | 821 | r->entropy_count < r->poolinfo->POOLBITS) { |
@@ -828,23 +826,17 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |||
828 | /* pull at least as many as BYTES as wakeup BITS */ | 826 | /* pull at least as many as BYTES as wakeup BITS */ |
829 | bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); | 827 | bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); |
830 | /* but never more than the buffer size */ | 828 | /* but never more than the buffer size */ |
831 | bytes = min_t(int, bytes, sizeof(u.tmp)); | 829 | bytes = min_t(int, bytes, sizeof(tmp)); |
832 | 830 | ||
833 | DEBUG_ENT("going to reseed %s with %d bits " | 831 | DEBUG_ENT("going to reseed %s with %d bits " |
834 | "(%d of %d requested)\n", | 832 | "(%d of %d requested)\n", |
835 | r->name, bytes * 8, nbytes * 8, r->entropy_count); | 833 | r->name, bytes * 8, nbytes * 8, r->entropy_count); |
836 | 834 | ||
837 | bytes = extract_entropy(r->pull, u.tmp, bytes, | 835 | bytes = extract_entropy(r->pull, tmp, bytes, |
838 | random_read_wakeup_thresh / 8, rsvd); | 836 | random_read_wakeup_thresh / 8, rsvd); |
839 | mix_pool_bytes(r, u.tmp, bytes, NULL); | 837 | mix_pool_bytes(r, tmp, bytes, NULL); |
840 | credit_entropy_bits(r, bytes*8); | 838 | credit_entropy_bits(r, bytes*8); |
841 | } | 839 | } |
842 | kmemcheck_mark_initialized(&u.hwrand, sizeof(u.hwrand)); | ||
843 | for (i = 0; i < 4; i++) | ||
844 | if (arch_get_random_long(&u.hwrand[i])) | ||
845 | break; | ||
846 | if (i) | ||
847 | mix_pool_bytes(r, &u.hwrand, sizeof(u.hwrand), 0); | ||
848 | } | 840 | } |
849 | 841 | ||
850 | /* | 842 | /* |
@@ -901,15 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
901 | static void extract_buf(struct entropy_store *r, __u8 *out) | 893 | static void extract_buf(struct entropy_store *r, __u8 *out) |
902 | { | 894 | { |
903 | int i; | 895 | int i; |
904 | __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; | 896 | union { |
897 | __u32 w[5]; | ||
898 | unsigned long l[LONGS(EXTRACT_SIZE)]; | ||
899 | } hash; | ||
900 | __u32 workspace[SHA_WORKSPACE_WORDS]; | ||
905 | __u8 extract[64]; | 901 | __u8 extract[64]; |
906 | unsigned long flags; | 902 | unsigned long flags; |
907 | 903 | ||
908 | /* Generate a hash across the pool, 16 words (512 bits) at a time */ | 904 | /* Generate a hash across the pool, 16 words (512 bits) at a time */ |
909 | sha_init(hash); | 905 | sha_init(hash.w); |
910 | spin_lock_irqsave(&r->lock, flags); | 906 | spin_lock_irqsave(&r->lock, flags); |
911 | for (i = 0; i < r->poolinfo->poolwords; i += 16) | 907 | for (i = 0; i < r->poolinfo->poolwords; i += 16) |
912 | sha_transform(hash, (__u8 *)(r->pool + i), workspace); | 908 | sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); |
913 | 909 | ||
914 | /* | 910 | /* |
915 | * We mix the hash back into the pool to prevent backtracking | 911 | * We mix the hash back into the pool to prevent backtracking |
@@ -920,14 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out) | |||
920 | * brute-forcing the feedback as hard as brute-forcing the | 916 | * brute-forcing the feedback as hard as brute-forcing the |
921 | * hash. | 917 | * hash. |
922 | */ | 918 | */ |
923 | __mix_pool_bytes(r, hash, sizeof(hash), extract); | 919 | __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract); |
924 | spin_unlock_irqrestore(&r->lock, flags); | 920 | spin_unlock_irqrestore(&r->lock, flags); |
925 | 921 | ||
926 | /* | 922 | /* |
927 | * To avoid duplicates, we atomically extract a portion of the | 923 | * To avoid duplicates, we atomically extract a portion of the |
928 | * pool while mixing, and hash one final time. | 924 | * pool while mixing, and hash one final time. |
929 | */ | 925 | */ |
930 | sha_transform(hash, extract, workspace); | 926 | sha_transform(hash.w, extract, workspace); |
931 | memset(extract, 0, sizeof(extract)); | 927 | memset(extract, 0, sizeof(extract)); |
932 | memset(workspace, 0, sizeof(workspace)); | 928 | memset(workspace, 0, sizeof(workspace)); |
933 | 929 | ||
@@ -936,11 +932,23 @@ static void extract_buf(struct entropy_store *r, __u8 *out) | |||
936 | * pattern, we fold it in half. Thus, we always feed back | 932 | * pattern, we fold it in half. Thus, we always feed back |
937 | * twice as much data as we output. | 933 | * twice as much data as we output. |
938 | */ | 934 | */ |
939 | hash[0] ^= hash[3]; | 935 | hash.w[0] ^= hash.w[3]; |
940 | hash[1] ^= hash[4]; | 936 | hash.w[1] ^= hash.w[4]; |
941 | hash[2] ^= rol32(hash[2], 16); | 937 | hash.w[2] ^= rol32(hash.w[2], 16); |
942 | memcpy(out, hash, EXTRACT_SIZE); | 938 | |
943 | memset(hash, 0, sizeof(hash)); | 939 | /* |
940 | * If we have a architectural hardware random number | ||
941 | * generator, mix that in, too. | ||
942 | */ | ||
943 | for (i = 0; i < LONGS(EXTRACT_SIZE); i++) { | ||
944 | unsigned long v; | ||
945 | if (!arch_get_random_long(&v)) | ||
946 | break; | ||
947 | hash.l[i] ^= v; | ||
948 | } | ||
949 | |||
950 | memcpy(out, &hash, EXTRACT_SIZE); | ||
951 | memset(&hash, 0, sizeof(hash)); | ||
944 | } | 952 | } |
945 | 953 | ||
946 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, | 954 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, |