diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/char/random.c | 199 |
1 files changed, 136 insertions, 63 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 38c6d1af6d1c..a42b3d764da8 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
| @@ -101,15 +101,13 @@ | |||
| 101 | * Exported interfaces ---- output | 101 | * Exported interfaces ---- output |
| 102 | * =============================== | 102 | * =============================== |
| 103 | * | 103 | * |
| 104 | * There are three exported interfaces; the first is one designed to | 104 | * There are four exported interfaces; two for use within the kernel, |
| 105 | * be used from within the kernel: | 105 | * and two or use from userspace. |
| 106 | * | 106 | * |
| 107 | * void get_random_bytes(void *buf, int nbytes); | 107 | * Exported interfaces ---- userspace output |
| 108 | * | 108 | * ----------------------------------------- |
| 109 | * This interface will return the requested number of random bytes, | ||
| 110 | * and place it in the requested buffer. | ||
| 111 | * | 109 | * |
| 112 | * The two other interfaces are two character devices /dev/random and | 110 | * The userspace interfaces are two character devices /dev/random and |
| 113 | * /dev/urandom. /dev/random is suitable for use when very high | 111 | * /dev/urandom. /dev/random is suitable for use when very high |
| 114 | * quality randomness is desired (for example, for key generation or | 112 | * quality randomness is desired (for example, for key generation or |
| 115 | * one-time pads), as it will only return a maximum of the number of | 113 | * one-time pads), as it will only return a maximum of the number of |
| @@ -122,6 +120,77 @@ | |||
| 122 | * this will result in random numbers that are merely cryptographically | 120 | * this will result in random numbers that are merely cryptographically |
| 123 | * strong. For many applications, however, this is acceptable. | 121 | * strong. For many applications, however, this is acceptable. |
| 124 | * | 122 | * |
| 123 | * Exported interfaces ---- kernel output | ||
| 124 | * -------------------------------------- | ||
| 125 | * | ||
| 126 | * The primary kernel interface is | ||
| 127 | * | ||
| 128 | * void get_random_bytes(void *buf, int nbytes); | ||
| 129 | * | ||
| 130 | * This interface will return the requested number of random bytes, | ||
| 131 | * and place it in the requested buffer. This is equivalent to a | ||
| 132 | * read from /dev/urandom. | ||
| 133 | * | ||
| 134 | * For less critical applications, there are the functions: | ||
| 135 | * | ||
| 136 | * u32 get_random_u32() | ||
| 137 | * u64 get_random_u64() | ||
| 138 | * unsigned int get_random_int() | ||
| 139 | * unsigned long get_random_long() | ||
| 140 | * | ||
| 141 | * These are produced by a cryptographic RNG seeded from get_random_bytes, | ||
| 142 | * and so do not deplete the entropy pool as much. These are recommended | ||
| 143 | * for most in-kernel operations *if the result is going to be stored in | ||
| 144 | * the kernel*. | ||
| 145 | * | ||
| 146 | * Specifically, the get_random_int() family do not attempt to do | ||
| 147 | * "anti-backtracking". If you capture the state of the kernel (e.g. | ||
| 148 | * by snapshotting the VM), you can figure out previous get_random_int() | ||
| 149 | * return values. But if the value is stored in the kernel anyway, | ||
| 150 | * this is not a problem. | ||
| 151 | * | ||
| 152 | * It *is* safe to expose get_random_int() output to attackers (e.g. as | ||
| 153 | * network cookies); given outputs 1..n, it's not feasible to predict | ||
| 154 | * outputs 0 or n+1. The only concern is an attacker who breaks into | ||
| 155 | * the kernel later; the get_random_int() engine is not reseeded as | ||
| 156 | * often as the get_random_bytes() one. | ||
| 157 | * | ||
| 158 | * get_random_bytes() is needed for keys that need to stay secret after | ||
| 159 | * they are erased from the kernel. For example, any key that will | ||
| 160 | * be wrapped and stored encrypted. And session encryption keys: we'd | ||
| 161 | * like to know that after the session is closed and the keys erased, | ||
| 162 | * the plaintext is unrecoverable to someone who recorded the ciphertext. | ||
| 163 | * | ||
| 164 | * But for network ports/cookies, stack canaries, PRNG seeds, address | ||
| 165 | * space layout randomization, session *authentication* keys, or other | ||
| 166 | * applications where the sensitive data is stored in the kernel in | ||
| 167 | * plaintext for as long as it's sensitive, the get_random_int() family | ||
| 168 | * is just fine. | ||
| 169 | * | ||
| 170 | * Consider ASLR. We want to keep the address space secret from an | ||
| 171 | * outside attacker while the process is running, but once the address | ||
| 172 | * space is torn down, it's of no use to an attacker any more. And it's | ||
| 173 | * stored in kernel data structures as long as it's alive, so worrying | ||
| 174 | * about an attacker's ability to extrapolate it from the get_random_int() | ||
| 175 | * CRNG is silly. | ||
| 176 | * | ||
| 177 | * Even some cryptographic keys are safe to generate with get_random_int(). | ||
| 178 | * In particular, keys for SipHash are generally fine. Here, knowledge | ||
| 179 | * of the key authorizes you to do something to a kernel object (inject | ||
| 180 | * packets to a network connection, or flood a hash table), and the | ||
| 181 | * key is stored with the object being protected. Once it goes away, | ||
| 182 | * we no longer care if anyone knows the key. | ||
| 183 | * | ||
| 184 | * prandom_u32() | ||
| 185 | * ------------- | ||
| 186 | * | ||
| 187 | * For even weaker applications, see the pseudorandom generator | ||
| 188 | * prandom_u32(), prandom_max(), and prandom_bytes(). If the random | ||
| 189 | * numbers aren't security-critical at all, these are *far* cheaper. | ||
| 190 | * Useful for self-tests, random error simulation, randomized backoffs, | ||
| 191 | * and any other application where you trust that nobody is trying to | ||
| 192 | * maliciously mess with you by guessing the "random" numbers. | ||
| 193 | * | ||
| 125 | * Exported interfaces ---- input | 194 | * Exported interfaces ---- input |
| 126 | * ============================== | 195 | * ============================== |
| 127 | * | 196 | * |
| @@ -295,7 +364,7 @@ | |||
| 295 | * To allow fractional bits to be tracked, the entropy_count field is | 364 | * To allow fractional bits to be tracked, the entropy_count field is |
| 296 | * denominated in units of 1/8th bits. | 365 | * denominated in units of 1/8th bits. |
| 297 | * | 366 | * |
| 298 | * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in | 367 | * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in |
| 299 | * credit_entropy_bits() needs to be 64 bits wide. | 368 | * credit_entropy_bits() needs to be 64 bits wide. |
| 300 | */ | 369 | */ |
| 301 | #define ENTROPY_SHIFT 3 | 370 | #define ENTROPY_SHIFT 3 |
| @@ -359,9 +428,9 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; | |||
| 359 | * polynomial which improves the resulting TGFSR polynomial to be | 428 | * polynomial which improves the resulting TGFSR polynomial to be |
| 360 | * irreducible, which we have made here. | 429 | * irreducible, which we have made here. |
| 361 | */ | 430 | */ |
| 362 | static struct poolinfo { | 431 | static const struct poolinfo { |
| 363 | int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits; | 432 | int poolbitshift, poolwords, poolbytes, poolfracbits; |
| 364 | #define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5) | 433 | #define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5) |
| 365 | int tap1, tap2, tap3, tap4, tap5; | 434 | int tap1, tap2, tap3, tap4, tap5; |
| 366 | } poolinfo_table[] = { | 435 | } poolinfo_table[] = { |
| 367 | /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */ | 436 | /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */ |
| @@ -415,7 +484,7 @@ struct crng_state { | |||
| 415 | spinlock_t lock; | 484 | spinlock_t lock; |
| 416 | }; | 485 | }; |
| 417 | 486 | ||
| 418 | struct crng_state primary_crng = { | 487 | static struct crng_state primary_crng = { |
| 419 | .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock), | 488 | .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock), |
| 420 | }; | 489 | }; |
| 421 | 490 | ||
| @@ -470,7 +539,6 @@ struct entropy_store { | |||
| 470 | unsigned short add_ptr; | 539 | unsigned short add_ptr; |
| 471 | unsigned short input_rotate; | 540 | unsigned short input_rotate; |
| 472 | int entropy_count; | 541 | int entropy_count; |
| 473 | int entropy_total; | ||
| 474 | unsigned int initialized:1; | 542 | unsigned int initialized:1; |
| 475 | unsigned int last_data_init:1; | 543 | unsigned int last_data_init:1; |
| 476 | __u8 last_data[EXTRACT_SIZE]; | 544 | __u8 last_data[EXTRACT_SIZE]; |
| @@ -643,7 +711,7 @@ static void process_random_ready_list(void) | |||
| 643 | */ | 711 | */ |
| 644 | static void credit_entropy_bits(struct entropy_store *r, int nbits) | 712 | static void credit_entropy_bits(struct entropy_store *r, int nbits) |
| 645 | { | 713 | { |
| 646 | int entropy_count, orig; | 714 | int entropy_count, orig, has_initialized = 0; |
| 647 | const int pool_size = r->poolinfo->poolfracbits; | 715 | const int pool_size = r->poolinfo->poolfracbits; |
| 648 | int nfrac = nbits << ENTROPY_SHIFT; | 716 | int nfrac = nbits << ENTROPY_SHIFT; |
| 649 | 717 | ||
| @@ -698,23 +766,25 @@ retry: | |||
| 698 | entropy_count = 0; | 766 | entropy_count = 0; |
| 699 | } else if (entropy_count > pool_size) | 767 | } else if (entropy_count > pool_size) |
| 700 | entropy_count = pool_size; | 768 | entropy_count = pool_size; |
| 769 | if ((r == &blocking_pool) && !r->initialized && | ||
| 770 | (entropy_count >> ENTROPY_SHIFT) > 128) | ||
| 771 | has_initialized = 1; | ||
| 701 | if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) | 772 | if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) |
| 702 | goto retry; | 773 | goto retry; |
| 703 | 774 | ||
| 704 | r->entropy_total += nbits; | 775 | if (has_initialized) |
| 705 | if (!r->initialized && r->entropy_total > 128) { | ||
| 706 | r->initialized = 1; | 776 | r->initialized = 1; |
| 707 | r->entropy_total = 0; | ||
| 708 | } | ||
| 709 | 777 | ||
| 710 | trace_credit_entropy_bits(r->name, nbits, | 778 | trace_credit_entropy_bits(r->name, nbits, |
| 711 | entropy_count >> ENTROPY_SHIFT, | 779 | entropy_count >> ENTROPY_SHIFT, _RET_IP_); |
| 712 | r->entropy_total, _RET_IP_); | ||
| 713 | 780 | ||
| 714 | if (r == &input_pool) { | 781 | if (r == &input_pool) { |
| 715 | int entropy_bits = entropy_count >> ENTROPY_SHIFT; | 782 | int entropy_bits = entropy_count >> ENTROPY_SHIFT; |
| 783 | struct entropy_store *other = &blocking_pool; | ||
| 716 | 784 | ||
| 717 | if (crng_init < 2 && entropy_bits >= 128) { | 785 | if (crng_init < 2) { |
| 786 | if (entropy_bits < 128) | ||
| 787 | return; | ||
| 718 | crng_reseed(&primary_crng, r); | 788 | crng_reseed(&primary_crng, r); |
| 719 | entropy_bits = r->entropy_count >> ENTROPY_SHIFT; | 789 | entropy_bits = r->entropy_count >> ENTROPY_SHIFT; |
| 720 | } | 790 | } |
| @@ -725,20 +795,14 @@ retry: | |||
| 725 | wake_up_interruptible(&random_read_wait); | 795 | wake_up_interruptible(&random_read_wait); |
| 726 | kill_fasync(&fasync, SIGIO, POLL_IN); | 796 | kill_fasync(&fasync, SIGIO, POLL_IN); |
| 727 | } | 797 | } |
| 728 | /* If the input pool is getting full, send some | 798 | /* If the input pool is getting full, and the blocking |
| 729 | * entropy to the blocking pool until it is 75% full. | 799 | * pool has room, send some entropy to the blocking |
| 800 | * pool. | ||
| 730 | */ | 801 | */ |
| 731 | if (entropy_bits > random_write_wakeup_bits && | 802 | if (!work_pending(&other->push_work) && |
| 732 | r->initialized && | 803 | (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) && |
| 733 | r->entropy_total >= 2*random_read_wakeup_bits) { | 804 | (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes)) |
| 734 | struct entropy_store *other = &blocking_pool; | 805 | schedule_work(&other->push_work); |
| 735 | |||
| 736 | if (other->entropy_count <= | ||
| 737 | 3 * other->poolinfo->poolfracbits / 4) { | ||
| 738 | schedule_work(&other->push_work); | ||
| 739 | r->entropy_total = 0; | ||
| 740 | } | ||
| 741 | } | ||
| 742 | } | 806 | } |
| 743 | } | 807 | } |
| 744 | 808 | ||
| @@ -777,6 +841,7 @@ static struct crng_state **crng_node_pool __read_mostly; | |||
| 777 | #endif | 841 | #endif |
| 778 | 842 | ||
| 779 | static void invalidate_batched_entropy(void); | 843 | static void invalidate_batched_entropy(void); |
| 844 | static void numa_crng_init(void); | ||
| 780 | 845 | ||
| 781 | static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); | 846 | static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); |
| 782 | static int __init parse_trust_cpu(char *arg) | 847 | static int __init parse_trust_cpu(char *arg) |
| @@ -805,7 +870,9 @@ static void crng_initialize(struct crng_state *crng) | |||
| 805 | } | 870 | } |
| 806 | crng->state[i] ^= rv; | 871 | crng->state[i] ^= rv; |
| 807 | } | 872 | } |
| 808 | if (trust_cpu && arch_init) { | 873 | if (trust_cpu && arch_init && crng == &primary_crng) { |
| 874 | invalidate_batched_entropy(); | ||
| 875 | numa_crng_init(); | ||
| 809 | crng_init = 2; | 876 | crng_init = 2; |
| 810 | pr_notice("random: crng done (trusting CPU's manufacturer)\n"); | 877 | pr_notice("random: crng done (trusting CPU's manufacturer)\n"); |
| 811 | } | 878 | } |
| @@ -1553,6 +1620,11 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, | |||
| 1553 | int large_request = (nbytes > 256); | 1620 | int large_request = (nbytes > 256); |
| 1554 | 1621 | ||
| 1555 | trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); | 1622 | trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); |
| 1623 | if (!r->initialized && r->pull) { | ||
| 1624 | xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8); | ||
| 1625 | if (!r->initialized) | ||
| 1626 | return 0; | ||
| 1627 | } | ||
| 1556 | xfer_secondary_pool(r, nbytes); | 1628 | xfer_secondary_pool(r, nbytes); |
| 1557 | nbytes = account(r, nbytes, 0, 0); | 1629 | nbytes = account(r, nbytes, 0, 0); |
| 1558 | 1630 | ||
| @@ -1783,7 +1855,7 @@ EXPORT_SYMBOL(get_random_bytes_arch); | |||
| 1783 | * data into the pool to prepare it for use. The pool is not cleared | 1855 | * data into the pool to prepare it for use. The pool is not cleared |
| 1784 | * as that can only decrease the entropy in the pool. | 1856 | * as that can only decrease the entropy in the pool. |
| 1785 | */ | 1857 | */ |
| 1786 | static void init_std_data(struct entropy_store *r) | 1858 | static void __init init_std_data(struct entropy_store *r) |
| 1787 | { | 1859 | { |
| 1788 | int i; | 1860 | int i; |
| 1789 | ktime_t now = ktime_get_real(); | 1861 | ktime_t now = ktime_get_real(); |
| @@ -1810,7 +1882,7 @@ static void init_std_data(struct entropy_store *r) | |||
| 1810 | * take care not to overwrite the precious per platform data | 1882 | * take care not to overwrite the precious per platform data |
| 1811 | * we were given. | 1883 | * we were given. |
| 1812 | */ | 1884 | */ |
| 1813 | static int rand_initialize(void) | 1885 | int __init rand_initialize(void) |
| 1814 | { | 1886 | { |
| 1815 | init_std_data(&input_pool); | 1887 | init_std_data(&input_pool); |
| 1816 | init_std_data(&blocking_pool); | 1888 | init_std_data(&blocking_pool); |
| @@ -1822,7 +1894,6 @@ static int rand_initialize(void) | |||
| 1822 | } | 1894 | } |
| 1823 | return 0; | 1895 | return 0; |
| 1824 | } | 1896 | } |
| 1825 | early_initcall(rand_initialize); | ||
| 1826 | 1897 | ||
| 1827 | #ifdef CONFIG_BLOCK | 1898 | #ifdef CONFIG_BLOCK |
| 1828 | void rand_initialize_disk(struct gendisk *disk) | 1899 | void rand_initialize_disk(struct gendisk *disk) |
| @@ -2211,8 +2282,8 @@ struct batched_entropy { | |||
| 2211 | u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)]; | 2282 | u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)]; |
| 2212 | }; | 2283 | }; |
| 2213 | unsigned int position; | 2284 | unsigned int position; |
| 2285 | spinlock_t batch_lock; | ||
| 2214 | }; | 2286 | }; |
| 2215 | static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); | ||
| 2216 | 2287 | ||
| 2217 | /* | 2288 | /* |
| 2218 | * Get a random word for internal kernel use only. The quality of the random | 2289 | * Get a random word for internal kernel use only. The quality of the random |
| @@ -2222,12 +2293,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_ | |||
| 2222 | * wait_for_random_bytes() should be called and return 0 at least once | 2293 | * wait_for_random_bytes() should be called and return 0 at least once |
| 2223 | * at any point prior. | 2294 | * at any point prior. |
| 2224 | */ | 2295 | */ |
| 2225 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); | 2296 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { |
| 2297 | .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), | ||
| 2298 | }; | ||
| 2299 | |||
| 2226 | u64 get_random_u64(void) | 2300 | u64 get_random_u64(void) |
| 2227 | { | 2301 | { |
| 2228 | u64 ret; | 2302 | u64 ret; |
| 2229 | bool use_lock; | 2303 | unsigned long flags; |
| 2230 | unsigned long flags = 0; | ||
| 2231 | struct batched_entropy *batch; | 2304 | struct batched_entropy *batch; |
| 2232 | static void *previous; | 2305 | static void *previous; |
| 2233 | 2306 | ||
| @@ -2242,28 +2315,25 @@ u64 get_random_u64(void) | |||
| 2242 | 2315 | ||
| 2243 | warn_unseeded_randomness(&previous); | 2316 | warn_unseeded_randomness(&previous); |
| 2244 | 2317 | ||
| 2245 | use_lock = READ_ONCE(crng_init) < 2; | 2318 | batch = raw_cpu_ptr(&batched_entropy_u64); |
| 2246 | batch = &get_cpu_var(batched_entropy_u64); | 2319 | spin_lock_irqsave(&batch->batch_lock, flags); |
| 2247 | if (use_lock) | ||
| 2248 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
| 2249 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { | 2320 | if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { |
| 2250 | extract_crng((u8 *)batch->entropy_u64); | 2321 | extract_crng((u8 *)batch->entropy_u64); |
| 2251 | batch->position = 0; | 2322 | batch->position = 0; |
| 2252 | } | 2323 | } |
| 2253 | ret = batch->entropy_u64[batch->position++]; | 2324 | ret = batch->entropy_u64[batch->position++]; |
| 2254 | if (use_lock) | 2325 | spin_unlock_irqrestore(&batch->batch_lock, flags); |
| 2255 | read_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
| 2256 | put_cpu_var(batched_entropy_u64); | ||
| 2257 | return ret; | 2326 | return ret; |
| 2258 | } | 2327 | } |
| 2259 | EXPORT_SYMBOL(get_random_u64); | 2328 | EXPORT_SYMBOL(get_random_u64); |
| 2260 | 2329 | ||
| 2261 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); | 2330 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { |
| 2331 | .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock), | ||
| 2332 | }; | ||
| 2262 | u32 get_random_u32(void) | 2333 | u32 get_random_u32(void) |
| 2263 | { | 2334 | { |
| 2264 | u32 ret; | 2335 | u32 ret; |
| 2265 | bool use_lock; | 2336 | unsigned long flags; |
| 2266 | unsigned long flags = 0; | ||
| 2267 | struct batched_entropy *batch; | 2337 | struct batched_entropy *batch; |
| 2268 | static void *previous; | 2338 | static void *previous; |
| 2269 | 2339 | ||
| @@ -2272,18 +2342,14 @@ u32 get_random_u32(void) | |||
| 2272 | 2342 | ||
| 2273 | warn_unseeded_randomness(&previous); | 2343 | warn_unseeded_randomness(&previous); |
| 2274 | 2344 | ||
| 2275 | use_lock = READ_ONCE(crng_init) < 2; | 2345 | batch = raw_cpu_ptr(&batched_entropy_u32); |
| 2276 | batch = &get_cpu_var(batched_entropy_u32); | 2346 | spin_lock_irqsave(&batch->batch_lock, flags); |
| 2277 | if (use_lock) | ||
| 2278 | read_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
| 2279 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { | 2347 | if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { |
| 2280 | extract_crng((u8 *)batch->entropy_u32); | 2348 | extract_crng((u8 *)batch->entropy_u32); |
| 2281 | batch->position = 0; | 2349 | batch->position = 0; |
| 2282 | } | 2350 | } |
| 2283 | ret = batch->entropy_u32[batch->position++]; | 2351 | ret = batch->entropy_u32[batch->position++]; |
| 2284 | if (use_lock) | 2352 | spin_unlock_irqrestore(&batch->batch_lock, flags); |
| 2285 | read_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
| 2286 | put_cpu_var(batched_entropy_u32); | ||
| 2287 | return ret; | 2353 | return ret; |
| 2288 | } | 2354 | } |
| 2289 | EXPORT_SYMBOL(get_random_u32); | 2355 | EXPORT_SYMBOL(get_random_u32); |
| @@ -2297,12 +2363,19 @@ static void invalidate_batched_entropy(void) | |||
| 2297 | int cpu; | 2363 | int cpu; |
| 2298 | unsigned long flags; | 2364 | unsigned long flags; |
| 2299 | 2365 | ||
| 2300 | write_lock_irqsave(&batched_entropy_reset_lock, flags); | ||
| 2301 | for_each_possible_cpu (cpu) { | 2366 | for_each_possible_cpu (cpu) { |
| 2302 | per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; | 2367 | struct batched_entropy *batched_entropy; |
| 2303 | per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; | 2368 | |
| 2369 | batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu); | ||
| 2370 | spin_lock_irqsave(&batched_entropy->batch_lock, flags); | ||
| 2371 | batched_entropy->position = 0; | ||
| 2372 | spin_unlock(&batched_entropy->batch_lock); | ||
| 2373 | |||
| 2374 | batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu); | ||
| 2375 | spin_lock(&batched_entropy->batch_lock); | ||
| 2376 | batched_entropy->position = 0; | ||
| 2377 | spin_unlock_irqrestore(&batched_entropy->batch_lock, flags); | ||
| 2304 | } | 2378 | } |
| 2305 | write_unlock_irqrestore(&batched_entropy_reset_lock, flags); | ||
| 2306 | } | 2379 | } |
| 2307 | 2380 | ||
| 2308 | /** | 2381 | /** |
