aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 11:16:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 11:16:24 -0400
commitf4f142ed4ef835709c7e6d12eaca10d190bcebed (patch)
treea0bc6850239fe3551bb67f5707bfef153ac437fe /drivers/char
parentbb2cbf5e9367d8598fecd0c48dead69560750223 (diff)
parente02b876597777ab26288dd2611a97b597d14d661 (diff)
Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random
Pull randomness updates from Ted Ts'o: "Cleanups and bug fixes to /dev/random, add a new getrandom(2) system call, which is a superset of OpenBSD's getentropy(2) call, for use with userspace crypto libraries such as LibreSSL. Also add the ability to have a kernel thread to pull entropy from hardware rng devices into /dev/random" * tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random: hwrng: Pass entropy to add_hwgenerator_randomness() in bits, not bytes random: limit the contribution of the hw rng to at most half random: introduce getrandom(2) system call hw_random: fix sparse warning (NULL vs 0 for pointer) random: use registers from interrupted code for CPU's w/o a cycle counter hwrng: add per-device entropy derating hwrng: create filler thread random: add_hwgenerator_randomness() for feeding entropy from devices random: use an improved fast_mix() function random: clean up interrupt entropy accounting for archs w/o cycle counters random: only update the last_pulled time if we actually transferred entropy random: remove unneeded hash of a portion of the entropy pool random: always update the entropy pool under the spinlock
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/hw_random/core.c67
-rw-r--r--drivers/char/random.c315
2 files changed, 252 insertions, 130 deletions
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index c4419ea1ab07..6e02ec103cc7 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -38,6 +38,7 @@
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/miscdevice.h> 40#include <linux/miscdevice.h>
41#include <linux/kthread.h>
41#include <linux/delay.h> 42#include <linux/delay.h>
42#include <linux/slab.h> 43#include <linux/slab.h>
43#include <linux/random.h> 44#include <linux/random.h>
@@ -50,10 +51,22 @@
50 51
51 52
52static struct hwrng *current_rng; 53static struct hwrng *current_rng;
54static struct task_struct *hwrng_fill;
53static LIST_HEAD(rng_list); 55static LIST_HEAD(rng_list);
54static DEFINE_MUTEX(rng_mutex); 56static DEFINE_MUTEX(rng_mutex);
55static int data_avail; 57static int data_avail;
56static u8 *rng_buffer; 58static u8 *rng_buffer, *rng_fillbuf;
59static unsigned short current_quality;
60static unsigned short default_quality; /* = 0; default to "off" */
61
62module_param(current_quality, ushort, 0644);
63MODULE_PARM_DESC(current_quality,
64 "current hwrng entropy estimation per mill");
65module_param(default_quality, ushort, 0644);
66MODULE_PARM_DESC(default_quality,
67 "default entropy content of hwrng per mill");
68
69static void start_khwrngd(void);
57 70
58static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 71static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
59 int wait); 72 int wait);
@@ -89,6 +102,15 @@ static inline int hwrng_init(struct hwrng *rng)
89 return ret; 102 return ret;
90 } 103 }
91 add_early_randomness(rng); 104 add_early_randomness(rng);
105
106 current_quality = rng->quality ? : default_quality;
107 current_quality &= 1023;
108
109 if (current_quality == 0 && hwrng_fill)
110 kthread_stop(hwrng_fill);
111 if (current_quality > 0 && !hwrng_fill)
112 start_khwrngd();
113
92 return 0; 114 return 0;
93} 115}
94 116
@@ -325,6 +347,36 @@ err_misc_dereg:
325 goto out; 347 goto out;
326} 348}
327 349
350static int hwrng_fillfn(void *unused)
351{
352 long rc;
353
354 while (!kthread_should_stop()) {
355 if (!current_rng)
356 break;
357 rc = rng_get_data(current_rng, rng_fillbuf,
358 rng_buffer_size(), 1);
359 if (rc <= 0) {
360 pr_warn("hwrng: no data available\n");
361 msleep_interruptible(10000);
362 continue;
363 }
364 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
365 rc * current_quality * 8 >> 10);
366 }
367 hwrng_fill = NULL;
368 return 0;
369}
370
371static void start_khwrngd(void)
372{
373 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
374 if (hwrng_fill == ERR_PTR(-ENOMEM)) {
375 pr_err("hwrng_fill thread creation failed");
376 hwrng_fill = NULL;
377 }
378}
379
328int hwrng_register(struct hwrng *rng) 380int hwrng_register(struct hwrng *rng)
329{ 381{
330 int err = -EINVAL; 382 int err = -EINVAL;
@@ -343,6 +395,13 @@ int hwrng_register(struct hwrng *rng)
343 if (!rng_buffer) 395 if (!rng_buffer)
344 goto out_unlock; 396 goto out_unlock;
345 } 397 }
398 if (!rng_fillbuf) {
399 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
400 if (!rng_fillbuf) {
401 kfree(rng_buffer);
402 goto out_unlock;
403 }
404 }
346 405
347 /* Must not register two RNGs with the same name. */ 406 /* Must not register two RNGs with the same name. */
348 err = -EEXIST; 407 err = -EEXIST;
@@ -406,8 +465,11 @@ void hwrng_unregister(struct hwrng *rng)
406 current_rng = NULL; 465 current_rng = NULL;
407 } 466 }
408 } 467 }
409 if (list_empty(&rng_list)) 468 if (list_empty(&rng_list)) {
410 unregister_miscdev(); 469 unregister_miscdev();
470 if (hwrng_fill)
471 kthread_stop(hwrng_fill);
472 }
411 473
412 mutex_unlock(&rng_mutex); 474 mutex_unlock(&rng_mutex);
413} 475}
@@ -418,6 +480,7 @@ static void __exit hwrng_exit(void)
418 mutex_lock(&rng_mutex); 480 mutex_lock(&rng_mutex);
419 BUG_ON(current_rng); 481 BUG_ON(current_rng);
420 kfree(rng_buffer); 482 kfree(rng_buffer);
483 kfree(rng_fillbuf);
421 mutex_unlock(&rng_mutex); 484 mutex_unlock(&rng_mutex);
422} 485}
423 486
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 71529e196b84..c18d41db83d8 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -250,6 +250,7 @@
250#include <linux/interrupt.h> 250#include <linux/interrupt.h>
251#include <linux/mm.h> 251#include <linux/mm.h>
252#include <linux/spinlock.h> 252#include <linux/spinlock.h>
253#include <linux/kthread.h>
253#include <linux/percpu.h> 254#include <linux/percpu.h>
254#include <linux/cryptohash.h> 255#include <linux/cryptohash.h>
255#include <linux/fips.h> 256#include <linux/fips.h>
@@ -257,6 +258,8 @@
257#include <linux/kmemcheck.h> 258#include <linux/kmemcheck.h>
258#include <linux/workqueue.h> 259#include <linux/workqueue.h>
259#include <linux/irq.h> 260#include <linux/irq.h>
261#include <linux/syscalls.h>
262#include <linux/completion.h>
260 263
261#include <asm/processor.h> 264#include <asm/processor.h>
262#include <asm/uaccess.h> 265#include <asm/uaccess.h>
@@ -267,6 +270,8 @@
267#define CREATE_TRACE_POINTS 270#define CREATE_TRACE_POINTS
268#include <trace/events/random.h> 271#include <trace/events/random.h>
269 272
273/* #define ADD_INTERRUPT_BENCH */
274
270/* 275/*
271 * Configuration information 276 * Configuration information
272 */ 277 */
@@ -401,6 +406,7 @@ static struct poolinfo {
401 */ 406 */
402static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); 407static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
403static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); 408static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
409static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
404static struct fasync_struct *fasync; 410static struct fasync_struct *fasync;
405 411
406/********************************************************************** 412/**********************************************************************
@@ -481,9 +487,9 @@ static __u32 const twist_table[8] = {
481 * the entropy is concentrated in the low-order bits. 487 * the entropy is concentrated in the low-order bits.
482 */ 488 */
483static void _mix_pool_bytes(struct entropy_store *r, const void *in, 489static void _mix_pool_bytes(struct entropy_store *r, const void *in,
484 int nbytes, __u8 out[64]) 490 int nbytes)
485{ 491{
486 unsigned long i, j, tap1, tap2, tap3, tap4, tap5; 492 unsigned long i, tap1, tap2, tap3, tap4, tap5;
487 int input_rotate; 493 int input_rotate;
488 int wordmask = r->poolinfo->poolwords - 1; 494 int wordmask = r->poolinfo->poolwords - 1;
489 const char *bytes = in; 495 const char *bytes = in;
@@ -495,9 +501,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
495 tap4 = r->poolinfo->tap4; 501 tap4 = r->poolinfo->tap4;
496 tap5 = r->poolinfo->tap5; 502 tap5 = r->poolinfo->tap5;
497 503
498 smp_rmb(); 504 input_rotate = r->input_rotate;
499 input_rotate = ACCESS_ONCE(r->input_rotate); 505 i = r->add_ptr;
500 i = ACCESS_ONCE(r->add_ptr);
501 506
502 /* mix one byte at a time to simplify size handling and churn faster */ 507 /* mix one byte at a time to simplify size handling and churn faster */
503 while (nbytes--) { 508 while (nbytes--) {
@@ -524,39 +529,33 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
524 input_rotate = (input_rotate + (i ? 7 : 14)) & 31; 529 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
525 } 530 }
526 531
527 ACCESS_ONCE(r->input_rotate) = input_rotate; 532 r->input_rotate = input_rotate;
528 ACCESS_ONCE(r->add_ptr) = i; 533 r->add_ptr = i;
529 smp_wmb();
530
531 if (out)
532 for (j = 0; j < 16; j++)
533 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
534} 534}
535 535
536static void __mix_pool_bytes(struct entropy_store *r, const void *in, 536static void __mix_pool_bytes(struct entropy_store *r, const void *in,
537 int nbytes, __u8 out[64]) 537 int nbytes)
538{ 538{
539 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_); 539 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
540 _mix_pool_bytes(r, in, nbytes, out); 540 _mix_pool_bytes(r, in, nbytes);
541} 541}
542 542
543static void mix_pool_bytes(struct entropy_store *r, const void *in, 543static void mix_pool_bytes(struct entropy_store *r, const void *in,
544 int nbytes, __u8 out[64]) 544 int nbytes)
545{ 545{
546 unsigned long flags; 546 unsigned long flags;
547 547
548 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_); 548 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
549 spin_lock_irqsave(&r->lock, flags); 549 spin_lock_irqsave(&r->lock, flags);
550 _mix_pool_bytes(r, in, nbytes, out); 550 _mix_pool_bytes(r, in, nbytes);
551 spin_unlock_irqrestore(&r->lock, flags); 551 spin_unlock_irqrestore(&r->lock, flags);
552} 552}
553 553
554struct fast_pool { 554struct fast_pool {
555 __u32 pool[4]; 555 __u32 pool[4];
556 unsigned long last; 556 unsigned long last;
557 unsigned short count; 557 unsigned short reg_idx;
558 unsigned char rotate; 558 unsigned char count;
559 unsigned char last_timer_intr;
560}; 559};
561 560
562/* 561/*
@@ -564,25 +563,29 @@ struct fast_pool {
564 * collector. It's hardcoded for an 128 bit pool and assumes that any 563 * collector. It's hardcoded for an 128 bit pool and assumes that any
565 * locks that might be needed are taken by the caller. 564 * locks that might be needed are taken by the caller.
566 */ 565 */
567static void fast_mix(struct fast_pool *f, __u32 input[4]) 566static void fast_mix(struct fast_pool *f)
568{ 567{
569 __u32 w; 568 __u32 a = f->pool[0], b = f->pool[1];
570 unsigned input_rotate = f->rotate; 569 __u32 c = f->pool[2], d = f->pool[3];
571 570
572 w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3]; 571 a += b; c += d;
573 f->pool[0] = (w >> 3) ^ twist_table[w & 7]; 572 b = rol32(a, 6); d = rol32(c, 27);
574 input_rotate = (input_rotate + 14) & 31; 573 d ^= a; b ^= c;
575 w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0]; 574
576 f->pool[1] = (w >> 3) ^ twist_table[w & 7]; 575 a += b; c += d;
577 input_rotate = (input_rotate + 7) & 31; 576 b = rol32(a, 16); d = rol32(c, 14);
578 w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1]; 577 d ^= a; b ^= c;
579 f->pool[2] = (w >> 3) ^ twist_table[w & 7]; 578
580 input_rotate = (input_rotate + 7) & 31; 579 a += b; c += d;
581 w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2]; 580 b = rol32(a, 6); d = rol32(c, 27);
582 f->pool[3] = (w >> 3) ^ twist_table[w & 7]; 581 d ^= a; b ^= c;
583 input_rotate = (input_rotate + 7) & 31; 582
584 583 a += b; c += d;
585 f->rotate = input_rotate; 584 b = rol32(a, 16); d = rol32(c, 14);
585 d ^= a; b ^= c;
586
587 f->pool[0] = a; f->pool[1] = b;
588 f->pool[2] = c; f->pool[3] = d;
586 f->count++; 589 f->count++;
587} 590}
588 591
@@ -657,6 +660,7 @@ retry:
657 r->entropy_total = 0; 660 r->entropy_total = 0;
658 if (r == &nonblocking_pool) { 661 if (r == &nonblocking_pool) {
659 prandom_reseed_late(); 662 prandom_reseed_late();
663 wake_up_interruptible(&urandom_init_wait);
660 pr_notice("random: %s pool is initialized\n", r->name); 664 pr_notice("random: %s pool is initialized\n", r->name);
661 } 665 }
662 } 666 }
@@ -739,13 +743,13 @@ void add_device_randomness(const void *buf, unsigned int size)
739 743
740 trace_add_device_randomness(size, _RET_IP_); 744 trace_add_device_randomness(size, _RET_IP_);
741 spin_lock_irqsave(&input_pool.lock, flags); 745 spin_lock_irqsave(&input_pool.lock, flags);
742 _mix_pool_bytes(&input_pool, buf, size, NULL); 746 _mix_pool_bytes(&input_pool, buf, size);
743 _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL); 747 _mix_pool_bytes(&input_pool, &time, sizeof(time));
744 spin_unlock_irqrestore(&input_pool.lock, flags); 748 spin_unlock_irqrestore(&input_pool.lock, flags);
745 749
746 spin_lock_irqsave(&nonblocking_pool.lock, flags); 750 spin_lock_irqsave(&nonblocking_pool.lock, flags);
747 _mix_pool_bytes(&nonblocking_pool, buf, size, NULL); 751 _mix_pool_bytes(&nonblocking_pool, buf, size);
748 _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL); 752 _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time));
749 spin_unlock_irqrestore(&nonblocking_pool.lock, flags); 753 spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
750} 754}
751EXPORT_SYMBOL(add_device_randomness); 755EXPORT_SYMBOL(add_device_randomness);
@@ -778,7 +782,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
778 sample.cycles = random_get_entropy(); 782 sample.cycles = random_get_entropy();
779 sample.num = num; 783 sample.num = num;
780 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; 784 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
781 mix_pool_bytes(r, &sample, sizeof(sample), NULL); 785 mix_pool_bytes(r, &sample, sizeof(sample));
782 786
783 /* 787 /*
784 * Calculate number of bits of randomness we probably added. 788 * Calculate number of bits of randomness we probably added.
@@ -835,6 +839,38 @@ EXPORT_SYMBOL_GPL(add_input_randomness);
835 839
836static DEFINE_PER_CPU(struct fast_pool, irq_randomness); 840static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
837 841
842#ifdef ADD_INTERRUPT_BENCH
843static unsigned long avg_cycles, avg_deviation;
844
845#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
846#define FIXED_1_2 (1 << (AVG_SHIFT-1))
847
848static void add_interrupt_bench(cycles_t start)
849{
850 long delta = random_get_entropy() - start;
851
852 /* Use a weighted moving average */
853 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
854 avg_cycles += delta;
855 /* And average deviation */
856 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
857 avg_deviation += delta;
858}
859#else
860#define add_interrupt_bench(x)
861#endif
862
863static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
864{
865 __u32 *ptr = (__u32 *) regs;
866
867 if (regs == NULL)
868 return 0;
869 if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
870 f->reg_idx = 0;
871 return *(ptr + f->reg_idx++);
872}
873
838void add_interrupt_randomness(int irq, int irq_flags) 874void add_interrupt_randomness(int irq, int irq_flags)
839{ 875{
840 struct entropy_store *r; 876 struct entropy_store *r;
@@ -842,55 +878,52 @@ void add_interrupt_randomness(int irq, int irq_flags)
842 struct pt_regs *regs = get_irq_regs(); 878 struct pt_regs *regs = get_irq_regs();
843 unsigned long now = jiffies; 879 unsigned long now = jiffies;
844 cycles_t cycles = random_get_entropy(); 880 cycles_t cycles = random_get_entropy();
845 __u32 input[4], c_high, j_high; 881 __u32 c_high, j_high;
846 __u64 ip; 882 __u64 ip;
847 unsigned long seed; 883 unsigned long seed;
848 int credit; 884 int credit = 0;
849 885
886 if (cycles == 0)
887 cycles = get_reg(fast_pool, regs);
850 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; 888 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
851 j_high = (sizeof(now) > 4) ? now >> 32 : 0; 889 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
852 input[0] = cycles ^ j_high ^ irq; 890 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
853 input[1] = now ^ c_high; 891 fast_pool->pool[1] ^= now ^ c_high;
854 ip = regs ? instruction_pointer(regs) : _RET_IP_; 892 ip = regs ? instruction_pointer(regs) : _RET_IP_;
855 input[2] = ip; 893 fast_pool->pool[2] ^= ip;
856 input[3] = ip >> 32; 894 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
895 get_reg(fast_pool, regs);
857 896
858 fast_mix(fast_pool, input); 897 fast_mix(fast_pool);
898 add_interrupt_bench(cycles);
859 899
860 if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ)) 900 if ((fast_pool->count < 64) &&
901 !time_after(now, fast_pool->last + HZ))
861 return; 902 return;
862 903
863 fast_pool->last = now;
864
865 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; 904 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
866 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); 905 if (!spin_trylock(&r->lock))
906 return;
867 907
868 /* 908 fast_pool->last = now;
869 * If we don't have a valid cycle counter, and we see 909 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
870 * back-to-back timer interrupts, then skip giving credit for
871 * any entropy, otherwise credit 1 bit.
872 */
873 credit = 1;
874 if (cycles == 0) {
875 if (irq_flags & __IRQF_TIMER) {
876 if (fast_pool->last_timer_intr)
877 credit = 0;
878 fast_pool->last_timer_intr = 1;
879 } else
880 fast_pool->last_timer_intr = 0;
881 }
882 910
883 /* 911 /*
884 * If we have architectural seed generator, produce a seed and 912 * If we have architectural seed generator, produce a seed and
885 * add it to the pool. For the sake of paranoia count it as 913 * add it to the pool. For the sake of paranoia don't let the
886 * 50% entropic. 914 * architectural seed generator dominate the input from the
915 * interrupt noise.
887 */ 916 */
888 if (arch_get_random_seed_long(&seed)) { 917 if (arch_get_random_seed_long(&seed)) {
889 __mix_pool_bytes(r, &seed, sizeof(seed), NULL); 918 __mix_pool_bytes(r, &seed, sizeof(seed));
890 credit += sizeof(seed) * 4; 919 credit = 1;
891 } 920 }
921 spin_unlock(&r->lock);
892 922
893 credit_entropy_bits(r, credit); 923 fast_pool->count = 0;
924
925 /* award one bit for the contents of the fast pool */
926 credit_entropy_bits(r, credit + 1);
894} 927}
895 928
896#ifdef CONFIG_BLOCK 929#ifdef CONFIG_BLOCK
@@ -922,6 +955,11 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
922static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes); 955static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
923static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) 956static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
924{ 957{
958 if (!r->pull ||
959 r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
960 r->entropy_count > r->poolinfo->poolfracbits)
961 return;
962
925 if (r->limit == 0 && random_min_urandom_seed) { 963 if (r->limit == 0 && random_min_urandom_seed) {
926 unsigned long now = jiffies; 964 unsigned long now = jiffies;
927 965
@@ -930,10 +968,8 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
930 return; 968 return;
931 r->last_pulled = now; 969 r->last_pulled = now;
932 } 970 }
933 if (r->pull && 971
934 r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) && 972 _xfer_secondary_pool(r, nbytes);
935 r->entropy_count < r->poolinfo->poolfracbits)
936 _xfer_secondary_pool(r, nbytes);
937} 973}
938 974
939static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes) 975static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
@@ -953,7 +989,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
953 ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); 989 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
954 bytes = extract_entropy(r->pull, tmp, bytes, 990 bytes = extract_entropy(r->pull, tmp, bytes,
955 random_read_wakeup_bits / 8, rsvd_bytes); 991 random_read_wakeup_bits / 8, rsvd_bytes);
956 mix_pool_bytes(r, tmp, bytes, NULL); 992 mix_pool_bytes(r, tmp, bytes);
957 credit_entropy_bits(r, bytes*8); 993 credit_entropy_bits(r, bytes*8);
958} 994}
959 995
@@ -1039,7 +1075,6 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1039 unsigned long l[LONGS(20)]; 1075 unsigned long l[LONGS(20)];
1040 } hash; 1076 } hash;
1041 __u32 workspace[SHA_WORKSPACE_WORDS]; 1077 __u32 workspace[SHA_WORKSPACE_WORDS];
1042 __u8 extract[64];
1043 unsigned long flags; 1078 unsigned long flags;
1044 1079
1045 /* 1080 /*
@@ -1068,15 +1103,9 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1068 * brute-forcing the feedback as hard as brute-forcing the 1103 * brute-forcing the feedback as hard as brute-forcing the
1069 * hash. 1104 * hash.
1070 */ 1105 */
1071 __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract); 1106 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1072 spin_unlock_irqrestore(&r->lock, flags); 1107 spin_unlock_irqrestore(&r->lock, flags);
1073 1108
1074 /*
1075 * To avoid duplicates, we atomically extract a portion of the
1076 * pool while mixing, and hash one final time.
1077 */
1078 sha_transform(hash.w, extract, workspace);
1079 memset(extract, 0, sizeof(extract));
1080 memset(workspace, 0, sizeof(workspace)); 1109 memset(workspace, 0, sizeof(workspace));
1081 1110
1082 /* 1111 /*
@@ -1160,13 +1189,14 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1160{ 1189{
1161 ssize_t ret = 0, i; 1190 ssize_t ret = 0, i;
1162 __u8 tmp[EXTRACT_SIZE]; 1191 __u8 tmp[EXTRACT_SIZE];
1192 int large_request = (nbytes > 256);
1163 1193
1164 trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_); 1194 trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1165 xfer_secondary_pool(r, nbytes); 1195 xfer_secondary_pool(r, nbytes);
1166 nbytes = account(r, nbytes, 0, 0); 1196 nbytes = account(r, nbytes, 0, 0);
1167 1197
1168 while (nbytes) { 1198 while (nbytes) {
1169 if (need_resched()) { 1199 if (large_request && need_resched()) {
1170 if (signal_pending(current)) { 1200 if (signal_pending(current)) {
1171 if (ret == 0) 1201 if (ret == 0)
1172 ret = -ERESTARTSYS; 1202 ret = -ERESTARTSYS;
@@ -1263,14 +1293,14 @@ static void init_std_data(struct entropy_store *r)
1263 unsigned long rv; 1293 unsigned long rv;
1264 1294
1265 r->last_pulled = jiffies; 1295 r->last_pulled = jiffies;
1266 mix_pool_bytes(r, &now, sizeof(now), NULL); 1296 mix_pool_bytes(r, &now, sizeof(now));
1267 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { 1297 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1268 if (!arch_get_random_seed_long(&rv) && 1298 if (!arch_get_random_seed_long(&rv) &&
1269 !arch_get_random_long(&rv)) 1299 !arch_get_random_long(&rv))
1270 rv = random_get_entropy(); 1300 rv = random_get_entropy();
1271 mix_pool_bytes(r, &rv, sizeof(rv), NULL); 1301 mix_pool_bytes(r, &rv, sizeof(rv));
1272 } 1302 }
1273 mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL); 1303 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1274} 1304}
1275 1305
1276/* 1306/*
@@ -1309,39 +1339,8 @@ void rand_initialize_disk(struct gendisk *disk)
1309} 1339}
1310#endif 1340#endif
1311 1341
1312/*
1313 * Attempt an emergency refill using arch_get_random_seed_long().
1314 *
1315 * As with add_interrupt_randomness() be paranoid and only
1316 * credit the output as 50% entropic.
1317 */
1318static int arch_random_refill(void)
1319{
1320 const unsigned int nlongs = 64; /* Arbitrary number */
1321 unsigned int n = 0;
1322 unsigned int i;
1323 unsigned long buf[nlongs];
1324
1325 if (!arch_has_random_seed())
1326 return 0;
1327
1328 for (i = 0; i < nlongs; i++) {
1329 if (arch_get_random_seed_long(&buf[n]))
1330 n++;
1331 }
1332
1333 if (n) {
1334 unsigned int rand_bytes = n * sizeof(unsigned long);
1335
1336 mix_pool_bytes(&input_pool, buf, rand_bytes, NULL);
1337 credit_entropy_bits(&input_pool, rand_bytes*4);
1338 }
1339
1340 return n;
1341}
1342
1343static ssize_t 1342static ssize_t
1344random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) 1343_random_read(int nonblock, char __user *buf, size_t nbytes)
1345{ 1344{
1346 ssize_t n; 1345 ssize_t n;
1347 1346
@@ -1360,12 +1359,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1360 return n; 1359 return n;
1361 1360
1362 /* Pool is (near) empty. Maybe wait and retry. */ 1361 /* Pool is (near) empty. Maybe wait and retry. */
1363 1362 if (nonblock)
1364 /* First try an emergency refill */
1365 if (arch_random_refill())
1366 continue;
1367
1368 if (file->f_flags & O_NONBLOCK)
1369 return -EAGAIN; 1363 return -EAGAIN;
1370 1364
1371 wait_event_interruptible(random_read_wait, 1365 wait_event_interruptible(random_read_wait,
@@ -1377,6 +1371,12 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1377} 1371}
1378 1372
1379static ssize_t 1373static ssize_t
1374random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1375{
1376 return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
1377}
1378
1379static ssize_t
1380urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) 1380urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1381{ 1381{
1382 int ret; 1382 int ret;
@@ -1424,7 +1424,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1424 count -= bytes; 1424 count -= bytes;
1425 p += bytes; 1425 p += bytes;
1426 1426
1427 mix_pool_bytes(r, buf, bytes, NULL); 1427 mix_pool_bytes(r, buf, bytes);
1428 cond_resched(); 1428 cond_resched();
1429 } 1429 }
1430 1430
@@ -1520,6 +1520,29 @@ const struct file_operations urandom_fops = {
1520 .llseek = noop_llseek, 1520 .llseek = noop_llseek,
1521}; 1521};
1522 1522
1523SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
1524 unsigned int, flags)
1525{
1526 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
1527 return -EINVAL;
1528
1529 if (count > INT_MAX)
1530 count = INT_MAX;
1531
1532 if (flags & GRND_RANDOM)
1533 return _random_read(flags & GRND_NONBLOCK, buf, count);
1534
1535 if (unlikely(nonblocking_pool.initialized == 0)) {
1536 if (flags & GRND_NONBLOCK)
1537 return -EAGAIN;
1538 wait_event_interruptible(urandom_init_wait,
1539 nonblocking_pool.initialized);
1540 if (signal_pending(current))
1541 return -ERESTARTSYS;
1542 }
1543 return urandom_read(NULL, buf, count, NULL);
1544}
1545
1523/*************************************************************** 1546/***************************************************************
1524 * Random UUID interface 1547 * Random UUID interface
1525 * 1548 *
@@ -1663,6 +1686,22 @@ struct ctl_table random_table[] = {
1663 .mode = 0444, 1686 .mode = 0444,
1664 .proc_handler = proc_do_uuid, 1687 .proc_handler = proc_do_uuid,
1665 }, 1688 },
1689#ifdef ADD_INTERRUPT_BENCH
1690 {
1691 .procname = "add_interrupt_avg_cycles",
1692 .data = &avg_cycles,
1693 .maxlen = sizeof(avg_cycles),
1694 .mode = 0444,
1695 .proc_handler = proc_doulongvec_minmax,
1696 },
1697 {
1698 .procname = "add_interrupt_avg_deviation",
1699 .data = &avg_deviation,
1700 .maxlen = sizeof(avg_deviation),
1701 .mode = 0444,
1702 .proc_handler = proc_doulongvec_minmax,
1703 },
1704#endif
1666 { } 1705 { }
1667}; 1706};
1668#endif /* CONFIG_SYSCTL */ 1707#endif /* CONFIG_SYSCTL */
@@ -1719,3 +1758,23 @@ randomize_range(unsigned long start, unsigned long end, unsigned long len)
1719 return 0; 1758 return 0;
1720 return PAGE_ALIGN(get_random_int() % range + start); 1759 return PAGE_ALIGN(get_random_int() % range + start);
1721} 1760}
1761
1762/* Interface for in-kernel drivers of true hardware RNGs.
1763 * Those devices may produce endless random bits and will be throttled
1764 * when our pool is full.
1765 */
1766void add_hwgenerator_randomness(const char *buffer, size_t count,
1767 size_t entropy)
1768{
1769 struct entropy_store *poolp = &input_pool;
1770
1771 /* Suspend writing if we're above the trickle threshold.
1772 * We'll be woken up again once below random_write_wakeup_thresh,
1773 * or when the calling thread is about to terminate.
1774 */
1775 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
1776 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
1777 mix_pool_bytes(poolp, buffer, count);
1778 credit_entropy_bits(poolp, entropy);
1779}
1780EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);