diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2019-04-20 00:09:51 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2019-04-20 00:09:56 -0400 |
commit | b7d5dc21072cda7124d13eae2aefb7343ef94197 (patch) | |
tree | 09d1d9c39c919133652ffcc8509efbf7c3c2cb7d /drivers/char/random.c | |
parent | 92e507d216139b356a375afbda2824e85235e748 (diff) |
random: add a spinlock_t to struct batched_entropy
The per-CPU variable batched_entropy_uXX is protected by get_cpu_var().
This is just a preempt_disable() which ensures that the variable is only
from the local CPU. It does not protect against users on the same CPU
from another context. It is possible that a preemptible context reads
slot 0 and then an interrupt occurs and the same value is read again.
The above scenario is confirmed by lockdep if we add a spinlock:
| ================================
| WARNING: inconsistent lock state
| 5.1.0-rc3+ #42 Not tainted
| --------------------------------
| inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
| ksoftirqd/9/56 [HC0[0]:SC1[1]:HE0:SE0] takes:
| (____ptrval____) (batched_entropy_u32.lock){+.?.}, at: get_random_u32+0x3e/0xe0
| {SOFTIRQ-ON-W} state was registered at:
| _raw_spin_lock+0x2a/0x40
| get_random_u32+0x3e/0xe0
| new_slab+0x15c/0x7b0
| ___slab_alloc+0x492/0x620
| __slab_alloc.isra.73+0x53/0xa0
| kmem_cache_alloc_node+0xaf/0x2a0
| copy_process.part.41+0x1e1/0x2370
| _do_fork+0xdb/0x6d0
| kernel_thread+0x20/0x30
| kthreadd+0x1ba/0x220
| ret_from_fork+0x3a/0x50
…
| other info that might help us debug this:
| Possible unsafe locking scenario:
|
| CPU0
| ----
| lock(batched_entropy_u32.lock);
| <Interrupt>
| lock(batched_entropy_u32.lock);
|
| *** DEADLOCK ***
|
| stack backtrace:
| Call Trace:
…
| kmem_cache_alloc_trace+0x20e/0x270
| ipmi_alloc_recv_msg+0x16/0x40
…
| __do_softirq+0xec/0x48d
| run_ksoftirqd+0x37/0x60
| smpboot_thread_fn+0x191/0x290
| kthread+0xfe/0x130
| ret_from_fork+0x3a/0x50
Add a spinlock_t to the batched_entropy data structure and acquire the
lock while accessing it. Acquire the lock with disabled interrupts
because this function may be used from interrupt context.
Remove the batched_entropy_reset_lock lock. Now that we have a lock for
the data scructure, we can access it from a remote CPU.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r-- | drivers/char/random.c | 52 |
1 files changed, 27 insertions, 25 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 587df86c1661..a42b3d764da8 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -2282,8 +2282,8 @@ struct batched_entropy { | |||
2282 | u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)]; | 2282 | u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)]; |
2283 | }; | 2283 | }; |
2284 | unsigned int position; | 2284 | unsigned int position; |
2285 | spinlock_t batch_lock; | ||
2285 | }; | 2286 | }; |
2286 | static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); | ||
2287 | 2287 | ||
2288 | /* | 2288 | /* |
2289 | * Get a random word for internal kernel use only. The quality of the random | 2289 | * Get a random word for internal kernel use only. The quality of the random |
@@ -2293,12 +2293,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_ | |||
2293 | * wait_for_random_bytes() should be called and return 0 at least once | 2293 | * wait_for_random_bytes() should be called and return 0 at least once |
2294 | * at any point prior. | 2294 | * at any point prior. |
2295 | */ | 2295 | */ |
2296 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); | 2296 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { |
2297 | .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), | ||
2298 | }; | ||
2299 | |||
2297 | u64 get_random_u64(void) | 2300 | u64 get_random_u64(void) |
2298 | { | 2301 | { |
2299 | u64 ret; | 2302 | u64 ret; |
2300 | bool use_lock; | 2303 | unsigned long flags; |
2301 | unsigned long flags = 0; | ||
2302 | struct batched_entropy *batch; | 2304 | struct batched_entropy *batch; |
2303 | static void *previous; | 2305 | static void *previous; |
2304 | 2306 | ||
@@ -2313,28 +2315,25 @@ u64 get_random_u64(void) | |||
2313 | 2315 | ||
2314 | warn_unseeded_randomness(&previous); | 2316 | warn_unseeded_randomness(&previous); |
2315 | 2317 | ||
2316 | use_lock = READ_ONCE(crng_init) < 2; | 2318 | batch = raw_cpu_ptr(&batched_entropy_u64); |
2317 | batch = &get_cpu_var(batched_entropy_u64); | 2319 | spin_lock_irqsave(&batch->batch_lock, flags); |
2318 | if (use_lock) | ||
2319 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
2320 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { | 2320 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { |
2321 | extract_crng((u8 *)batch->entropy_u64); | 2321 | extract_crng((u8 *)batch->entropy_u64); |
2322 | batch->position = 0; | 2322 | batch->position = 0; |
2323 | } | 2323 | } |
2324 | ret = batch->entropy_u64[batch->position++]; | 2324 | ret = batch->entropy_u64[batch->position++]; |
2325 | if (use_lock) | 2325 | spin_unlock_irqrestore(&batch->batch_lock, flags); |
2326 | read_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
2327 | put_cpu_var(batched_entropy_u64); | ||
2328 | return ret; | 2326 | return ret; |
2329 | } | 2327 | } |
2330 | EXPORT_SYMBOL(get_random_u64); | 2328 | EXPORT_SYMBOL(get_random_u64); |
2331 | 2329 | ||
2332 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); | 2330 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { |
2331 | .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), | ||
2332 | }; | ||
2333 | u32 get_random_u32(void) | 2333 | u32 get_random_u32(void) |
2334 | { | 2334 | { |
2335 | u32 ret; | 2335 | u32 ret; |
2336 | bool use_lock; | 2336 | unsigned long flags; |
2337 | unsigned long flags = 0; | ||
2338 | struct batched_entropy *batch; | 2337 | struct batched_entropy *batch; |
2339 | static void *previous; | 2338 | static void *previous; |
2340 | 2339 | ||
@@ -2343,18 +2342,14 @@ u32 get_random_u32(void) | |||
2343 | 2342 | ||
2344 | warn_unseeded_randomness(&previous); | 2343 | warn_unseeded_randomness(&previous); |
2345 | 2344 | ||
2346 | use_lock = READ_ONCE(crng_init) < 2; | 2345 | batch = raw_cpu_ptr(&batched_entropy_u32); |
2347 | batch = &get_cpu_var(batched_entropy_u32); | 2346 | spin_lock_irqsave(&batch->batch_lock, flags); |
2348 | if (use_lock) | ||
2349 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
2350 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { | 2347 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { |
2351 | extract_crng((u8 *)batch->entropy_u32); | 2348 | extract_crng((u8 *)batch->entropy_u32); |
2352 | batch->position = 0; | 2349 | batch->position = 0; |
2353 | } | 2350 | } |
2354 | ret = batch->entropy_u32[batch->position++]; | 2351 | ret = batch->entropy_u32[batch->position++]; |
2355 | if (use_lock) | 2352 | spin_unlock_irqrestore(&batch->batch_lock, flags); |
2356 | read_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
2357 | put_cpu_var(batched_entropy_u32); | ||
2358 | return ret; | 2353 | return ret; |
2359 | } | 2354 | } |
2360 | EXPORT_SYMBOL(get_random_u32); | 2355 | EXPORT_SYMBOL(get_random_u32); |
@@ -2368,12 +2363,19 @@ static void invalidate_batched_entropy(void) | |||
2368 | int cpu; | 2363 | int cpu; |
2369 | unsigned long flags; | 2364 | unsigned long flags; |
2370 | 2365 | ||
2371 | write_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
2372 | for_each_possible_cpu (cpu) { | 2366 | for_each_possible_cpu (cpu) { |
2373 | per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; | 2367 | struct batched_entropy *batched_entropy; |
2374 | per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; | 2368 | |
2369 | batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); | ||
2370 | spin_lock_irqsave(&batched_entropy->batch_lock, flags); | ||
2371 | batched_entropy->position = 0; | ||
2372 | spin_unlock(&batched_entropy->batch_lock); | ||
2373 | |||
2374 | batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu); | ||
2375 | spin_lock(&batched_entropy->batch_lock); | ||
2376 | batched_entropy->position = 0; | ||
2377 | spin_unlock_irqrestore(&batched_entropy->batch_lock, flags); | ||
2375 | } | 2378 | } |
2376 | write_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
2377 | } | 2379 | } |
2378 | 2380 | ||
2379 | /** | 2381 | /** |