diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-11 15:02:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-11 15:02:01 -0400 |
commit | 5ad9345d2321eb1442794098506d136d01cf8345 (patch) | |
tree | 500c5274cf0433c7266c901727b722315508c358 | |
parent | 5e38b72ac1b0000f1d7bd2b62fba7ee9053f5b94 (diff) | |
parent | b169c13de473a85b3c859bb36216a4cb5f00a54a (diff) |
Merge tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random
Pull randomness fixes from Ted Ts'o:
"Improve performance by using a lockless update mechanism suggested by
Linus, and make sure we refresh per-CPU entropy returned get_random_*
as soon as the CRNG is initialized"
* tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random:
random: invalidate batched entropy after crng init
random: use lockless method of accessing and updating f->reg_idx
-rw-r--r-- | drivers/char/random.c | 49 |
1 files changed, 43 insertions, 6 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index a561f0c2f428..e870f329db88 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1,6 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * random.c -- A strong random number generator | 2 | * random.c -- A strong random number generator |
3 | * | 3 | * |
4 | * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All | ||
5 | * Rights Reserved. | ||
6 | * | ||
4 | * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 | 7 | * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 |
5 | * | 8 | * |
6 | * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All | 9 | * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All |
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); | |||
762 | static struct crng_state **crng_node_pool __read_mostly; | 765 | static struct crng_state **crng_node_pool __read_mostly; |
763 | #endif | 766 | #endif |
764 | 767 | ||
768 | static void invalidate_batched_entropy(void); | ||
769 | |||
765 | static void crng_initialize(struct crng_state *crng) | 770 | static void crng_initialize(struct crng_state *crng) |
766 | { | 771 | { |
767 | int i; | 772 | int i; |
@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len) | |||
799 | cp++; crng_init_cnt++; len--; | 804 | cp++; crng_init_cnt++; len--; |
800 | } | 805 | } |
801 | if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { | 806 | if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { |
807 | invalidate_batched_entropy(); | ||
802 | crng_init = 1; | 808 | crng_init = 1; |
803 | wake_up_interruptible(&crng_init_wait); | 809 | wake_up_interruptible(&crng_init_wait); |
804 | pr_notice("random: fast init done\n"); | 810 | pr_notice("random: fast init done\n"); |
@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) | |||
836 | memzero_explicit(&buf, sizeof(buf)); | 842 | memzero_explicit(&buf, sizeof(buf)); |
837 | crng->init_time = jiffies; | 843 | crng->init_time = jiffies; |
838 | if (crng == &primary_crng && crng_init < 2) { | 844 | if (crng == &primary_crng && crng_init < 2) { |
845 | invalidate_batched_entropy(); | ||
839 | crng_init = 2; | 846 | crng_init = 2; |
840 | process_random_ready_list(); | 847 | process_random_ready_list(); |
841 | wake_up_interruptible(&crng_init_wait); | 848 | wake_up_interruptible(&crng_init_wait); |
@@ -1097,15 +1104,15 @@ static void add_interrupt_bench(cycles_t start) | |||
1097 | static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) | 1104 | static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) |
1098 | { | 1105 | { |
1099 | __u32 *ptr = (__u32 *) regs; | 1106 | __u32 *ptr = (__u32 *) regs; |
1100 | unsigned long flags; | 1107 | unsigned int idx; |
1101 | 1108 | ||
1102 | if (regs == NULL) | 1109 | if (regs == NULL) |
1103 | return 0; | 1110 | return 0; |
1104 | local_irq_save(flags); | 1111 | idx = READ_ONCE(f->reg_idx); |
1105 | if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) | 1112 | if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) |
1106 | f->reg_idx = 0; | 1113 | idx = 0; |
1107 | ptr += f->reg_idx++; | 1114 | ptr += idx++; |
1108 | local_irq_restore(flags); | 1115 | WRITE_ONCE(f->reg_idx, idx); |
1109 | return *ptr; | 1116 | return *ptr; |
1110 | } | 1117 | } |
1111 | 1118 | ||
@@ -2023,6 +2030,7 @@ struct batched_entropy { | |||
2023 | }; | 2030 | }; |
2024 | unsigned int position; | 2031 | unsigned int position; |
2025 | }; | 2032 | }; |
2033 | static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); | ||
2026 | 2034 | ||
2027 | /* | 2035 | /* |
2028 | * Get a random word for internal kernel use only. The quality of the random | 2036 | * Get a random word for internal kernel use only. The quality of the random |
@@ -2033,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); | |||
2033 | u64 get_random_u64(void) | 2041 | u64 get_random_u64(void) |
2034 | { | 2042 | { |
2035 | u64 ret; | 2043 | u64 ret; |
2044 | bool use_lock = crng_init < 2; | ||
2045 | unsigned long flags; | ||
2036 | struct batched_entropy *batch; | 2046 | struct batched_entropy *batch; |
2037 | 2047 | ||
2038 | #if BITS_PER_LONG == 64 | 2048 | #if BITS_PER_LONG == 64 |
@@ -2045,11 +2055,15 @@ u64 get_random_u64(void) | |||
2045 | #endif | 2055 | #endif |
2046 | 2056 | ||
2047 | batch = &get_cpu_var(batched_entropy_u64); | 2057 | batch = &get_cpu_var(batched_entropy_u64); |
2058 | if (use_lock) | ||
2059 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
2048 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { | 2060 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { |
2049 | extract_crng((u8 *)batch->entropy_u64); | 2061 | extract_crng((u8 *)batch->entropy_u64); |
2050 | batch->position = 0; | 2062 | batch->position = 0; |
2051 | } | 2063 | } |
2052 | ret = batch->entropy_u64[batch->position++]; | 2064 | ret = batch->entropy_u64[batch->position++]; |
2065 | if (use_lock) | ||
2066 | read_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
2053 | put_cpu_var(batched_entropy_u64); | 2067 | put_cpu_var(batched_entropy_u64); |
2054 | return ret; | 2068 | return ret; |
2055 | } | 2069 | } |
@@ -2059,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); | |||
2059 | u32 get_random_u32(void) | 2073 | u32 get_random_u32(void) |
2060 | { | 2074 | { |
2061 | u32 ret; | 2075 | u32 ret; |
2076 | bool use_lock = crng_init < 2; | ||
2077 | unsigned long flags; | ||
2062 | struct batched_entropy *batch; | 2078 | struct batched_entropy *batch; |
2063 | 2079 | ||
2064 | if (arch_get_random_int(&ret)) | 2080 | if (arch_get_random_int(&ret)) |
2065 | return ret; | 2081 | return ret; |
2066 | 2082 | ||
2067 | batch = &get_cpu_var(batched_entropy_u32); | 2083 | batch = &get_cpu_var(batched_entropy_u32); |
2084 | if (use_lock) | ||
2085 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
2068 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { | 2086 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { |
2069 | extract_crng((u8 *)batch->entropy_u32); | 2087 | extract_crng((u8 *)batch->entropy_u32); |
2070 | batch->position = 0; | 2088 | batch->position = 0; |
2071 | } | 2089 | } |
2072 | ret = batch->entropy_u32[batch->position++]; | 2090 | ret = batch->entropy_u32[batch->position++]; |
2091 | if (use_lock) | ||
2092 | read_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
2073 | put_cpu_var(batched_entropy_u32); | 2093 | put_cpu_var(batched_entropy_u32); |
2074 | return ret; | 2094 | return ret; |
2075 | } | 2095 | } |
2076 | EXPORT_SYMBOL(get_random_u32); | 2096 | EXPORT_SYMBOL(get_random_u32); |
2077 | 2097 | ||
2098 | /* It's important to invalidate all potential batched entropy that might | ||
2099 | * be stored before the crng is initialized, which we can do lazily by | ||
2100 | * simply resetting the counter to zero so that it's re-extracted on the | ||
2101 | * next usage. */ | ||
2102 | static void invalidate_batched_entropy(void) | ||
2103 | { | ||
2104 | int cpu; | ||
2105 | unsigned long flags; | ||
2106 | |||
2107 | write_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
2108 | for_each_possible_cpu (cpu) { | ||
2109 | per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; | ||
2110 | per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; | ||
2111 | } | ||
2112 | write_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
2113 | } | ||
2114 | |||
2078 | /** | 2115 | /** |
2079 | * randomize_page - Generate a random, page aligned address | 2116 | * randomize_page - Generate a random, page aligned address |
2080 | * @start: The smallest acceptable address the caller will take. | 2117 | * @start: The smallest acceptable address the caller will take. |