aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c355
1 files changed, 230 insertions, 125 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4ec04a754733..b86eae9b77df 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -125,21 +125,26 @@
125 * The current exported interfaces for gathering environmental noise 125 * The current exported interfaces for gathering environmental noise
126 * from the devices are: 126 * from the devices are:
127 * 127 *
128 * void add_device_randomness(const void *buf, unsigned int size);
128 * void add_input_randomness(unsigned int type, unsigned int code, 129 * void add_input_randomness(unsigned int type, unsigned int code,
129 * unsigned int value); 130 * unsigned int value);
130 * void add_interrupt_randomness(int irq); 131 * void add_interrupt_randomness(int irq, int irq_flags);
131 * void add_disk_randomness(struct gendisk *disk); 132 * void add_disk_randomness(struct gendisk *disk);
132 * 133 *
134 * add_device_randomness() is for adding data to the random pool that
135 * is likely to differ between two devices (or possibly even per boot).
136 * This would be things like MAC addresses or serial numbers, or the
137 * read-out of the RTC. This does *not* add any actual entropy to the
138 * pool, but it initializes the pool to different values for devices
139 * that might otherwise be identical and have very little entropy
140 * available to them (particularly common in the embedded world).
141 *
133 * add_input_randomness() uses the input layer interrupt timing, as well as 142 * add_input_randomness() uses the input layer interrupt timing, as well as
134 * the event type information from the hardware. 143 * the event type information from the hardware.
135 * 144 *
136 * add_interrupt_randomness() uses the inter-interrupt timing as random 145 * add_interrupt_randomness() uses the interrupt timing as random
137 * inputs to the entropy pool. Note that not all interrupts are good 146 * inputs to the entropy pool. Using the cycle counters and the irq source
138 * sources of randomness! For example, the timer interrupts is not a 147 * as inputs, it feeds the randomness roughly once a second.
139 * good choice, because the periodicity of the interrupts is too
140 * regular, and hence predictable to an attacker. Network Interface
141 * Controller interrupts are a better measure, since the timing of the
142 * NIC interrupts are more unpredictable.
143 * 148 *
144 * add_disk_randomness() uses what amounts to the seek time of block 149 * add_disk_randomness() uses what amounts to the seek time of block
145 * layer request events, on a per-disk_devt basis, as input to the 150 * layer request events, on a per-disk_devt basis, as input to the
@@ -248,6 +253,8 @@
248#include <linux/percpu.h> 253#include <linux/percpu.h>
249#include <linux/cryptohash.h> 254#include <linux/cryptohash.h>
250#include <linux/fips.h> 255#include <linux/fips.h>
256#include <linux/ptrace.h>
257#include <linux/kmemcheck.h>
251 258
252#ifdef CONFIG_GENERIC_HARDIRQS 259#ifdef CONFIG_GENERIC_HARDIRQS
253# include <linux/irq.h> 260# include <linux/irq.h>
@@ -256,8 +263,12 @@
256#include <asm/processor.h> 263#include <asm/processor.h>
257#include <asm/uaccess.h> 264#include <asm/uaccess.h>
258#include <asm/irq.h> 265#include <asm/irq.h>
266#include <asm/irq_regs.h>
259#include <asm/io.h> 267#include <asm/io.h>
260 268
269#define CREATE_TRACE_POINTS
270#include <trace/events/random.h>
271
261/* 272/*
262 * Configuration information 273 * Configuration information
263 */ 274 */
@@ -266,6 +277,8 @@
266#define SEC_XFER_SIZE 512 277#define SEC_XFER_SIZE 512
267#define EXTRACT_SIZE 10 278#define EXTRACT_SIZE 10
268 279
280#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
281
269/* 282/*
270 * The minimum number of bits of entropy before we wake up a read on 283 * The minimum number of bits of entropy before we wake up a read on
271 * /dev/random. Should be enough to do a significant reseed. 284 * /dev/random. Should be enough to do a significant reseed.
@@ -420,8 +433,10 @@ struct entropy_store {
420 /* read-write data: */ 433 /* read-write data: */
421 spinlock_t lock; 434 spinlock_t lock;
422 unsigned add_ptr; 435 unsigned add_ptr;
436 unsigned input_rotate;
423 int entropy_count; 437 int entropy_count;
424 int input_rotate; 438 int entropy_total;
439 unsigned int initialized:1;
425 __u8 last_data[EXTRACT_SIZE]; 440 __u8 last_data[EXTRACT_SIZE];
426}; 441};
427 442
@@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = {
454 .pool = nonblocking_pool_data 469 .pool = nonblocking_pool_data
455}; 470};
456 471
472static __u32 const twist_table[8] = {
473 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
474 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
475
457/* 476/*
458 * This function adds bytes into the entropy "pool". It does not 477 * This function adds bytes into the entropy "pool". It does not
459 * update the entropy estimate. The caller should call 478 * update the entropy estimate. The caller should call
@@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = {
464 * it's cheap to do so and helps slightly in the expected case where 483 * it's cheap to do so and helps slightly in the expected case where
465 * the entropy is concentrated in the low-order bits. 484 * the entropy is concentrated in the low-order bits.
466 */ 485 */
467static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, 486static void _mix_pool_bytes(struct entropy_store *r, const void *in,
468 int nbytes, __u8 out[64]) 487 int nbytes, __u8 out[64])
469{ 488{
470 static __u32 const twist_table[8] = {
471 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
472 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
473 unsigned long i, j, tap1, tap2, tap3, tap4, tap5; 489 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
474 int input_rotate; 490 int input_rotate;
475 int wordmask = r->poolinfo->poolwords - 1; 491 int wordmask = r->poolinfo->poolwords - 1;
476 const char *bytes = in; 492 const char *bytes = in;
477 __u32 w; 493 __u32 w;
478 unsigned long flags;
479 494
480 /* Taps are constant, so we can load them without holding r->lock. */
481 tap1 = r->poolinfo->tap1; 495 tap1 = r->poolinfo->tap1;
482 tap2 = r->poolinfo->tap2; 496 tap2 = r->poolinfo->tap2;
483 tap3 = r->poolinfo->tap3; 497 tap3 = r->poolinfo->tap3;
484 tap4 = r->poolinfo->tap4; 498 tap4 = r->poolinfo->tap4;
485 tap5 = r->poolinfo->tap5; 499 tap5 = r->poolinfo->tap5;
486 500
487 spin_lock_irqsave(&r->lock, flags); 501 smp_rmb();
488 input_rotate = r->input_rotate; 502 input_rotate = ACCESS_ONCE(r->input_rotate);
489 i = r->add_ptr; 503 i = ACCESS_ONCE(r->add_ptr);
490 504
491 /* mix one byte at a time to simplify size handling and churn faster */ 505 /* mix one byte at a time to simplify size handling and churn faster */
492 while (nbytes--) { 506 while (nbytes--) {
@@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
513 input_rotate += i ? 7 : 14; 527 input_rotate += i ? 7 : 14;
514 } 528 }
515 529
516 r->input_rotate = input_rotate; 530 ACCESS_ONCE(r->input_rotate) = input_rotate;
517 r->add_ptr = i; 531 ACCESS_ONCE(r->add_ptr) = i;
532 smp_wmb();
518 533
519 if (out) 534 if (out)
520 for (j = 0; j < 16; j++) 535 for (j = 0; j < 16; j++)
521 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; 536 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
537}
538
539static void __mix_pool_bytes(struct entropy_store *r, const void *in,
540 int nbytes, __u8 out[64])
541{
542 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
543 _mix_pool_bytes(r, in, nbytes, out);
544}
545
546static void mix_pool_bytes(struct entropy_store *r, const void *in,
547 int nbytes, __u8 out[64])
548{
549 unsigned long flags;
522 550
551 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
552 spin_lock_irqsave(&r->lock, flags);
553 _mix_pool_bytes(r, in, nbytes, out);
523 spin_unlock_irqrestore(&r->lock, flags); 554 spin_unlock_irqrestore(&r->lock, flags);
524} 555}
525 556
526static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) 557struct fast_pool {
558 __u32 pool[4];
559 unsigned long last;
560 unsigned short count;
561 unsigned char rotate;
562 unsigned char last_timer_intr;
563};
564
565/*
566 * This is a fast mixing routine used by the interrupt randomness
567 * collector. It's hardcoded for an 128 bit pool and assumes that any
568 * locks that might be needed are taken by the caller.
569 */
570static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
527{ 571{
528 mix_pool_bytes_extract(r, in, bytes, NULL); 572 const char *bytes = in;
573 __u32 w;
574 unsigned i = f->count;
575 unsigned input_rotate = f->rotate;
576
577 while (nbytes--) {
578 w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
579 f->pool[(i + 1) & 3];
580 f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
581 input_rotate += (i++ & 3) ? 7 : 14;
582 }
583 f->count = i;
584 f->rotate = input_rotate;
529} 585}
530 586
531/* 587/*
@@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
533 */ 589 */
534static void credit_entropy_bits(struct entropy_store *r, int nbits) 590static void credit_entropy_bits(struct entropy_store *r, int nbits)
535{ 591{
536 unsigned long flags; 592 int entropy_count, orig;
537 int entropy_count;
538 593
539 if (!nbits) 594 if (!nbits)
540 return; 595 return;
541 596
542 spin_lock_irqsave(&r->lock, flags);
543
544 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); 597 DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
545 entropy_count = r->entropy_count; 598retry:
599 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
546 entropy_count += nbits; 600 entropy_count += nbits;
601
547 if (entropy_count < 0) { 602 if (entropy_count < 0) {
548 DEBUG_ENT("negative entropy/overflow\n"); 603 DEBUG_ENT("negative entropy/overflow\n");
549 entropy_count = 0; 604 entropy_count = 0;
550 } else if (entropy_count > r->poolinfo->POOLBITS) 605 } else if (entropy_count > r->poolinfo->POOLBITS)
551 entropy_count = r->poolinfo->POOLBITS; 606 entropy_count = r->poolinfo->POOLBITS;
552 r->entropy_count = entropy_count; 607 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
608 goto retry;
609
610 if (!r->initialized && nbits > 0) {
611 r->entropy_total += nbits;
612 if (r->entropy_total > 128)
613 r->initialized = 1;
614 }
615
616 trace_credit_entropy_bits(r->name, nbits, entropy_count,
617 r->entropy_total, _RET_IP_);
553 618
554 /* should we wake readers? */ 619 /* should we wake readers? */
555 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { 620 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
556 wake_up_interruptible(&random_read_wait); 621 wake_up_interruptible(&random_read_wait);
557 kill_fasync(&fasync, SIGIO, POLL_IN); 622 kill_fasync(&fasync, SIGIO, POLL_IN);
558 } 623 }
559 spin_unlock_irqrestore(&r->lock, flags);
560} 624}
561 625
562/********************************************************************* 626/*********************************************************************
@@ -572,42 +636,24 @@ struct timer_rand_state {
572 unsigned dont_count_entropy:1; 636 unsigned dont_count_entropy:1;
573}; 637};
574 638
575#ifndef CONFIG_GENERIC_HARDIRQS 639/*
576 640 * Add device- or boot-specific data to the input and nonblocking
577static struct timer_rand_state *irq_timer_state[NR_IRQS]; 641 * pools to help initialize them to unique values.
578 642 *
579static struct timer_rand_state *get_timer_rand_state(unsigned int irq) 643 * None of this adds any entropy, it is meant to avoid the
580{ 644 * problem of the nonblocking pool having similar initial state
581 return irq_timer_state[irq]; 645 * across largely identical devices.
582} 646 */
583 647void add_device_randomness(const void *buf, unsigned int size)
584static void set_timer_rand_state(unsigned int irq,
585 struct timer_rand_state *state)
586{
587 irq_timer_state[irq] = state;
588}
589
590#else
591
592static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
593{
594 struct irq_desc *desc;
595
596 desc = irq_to_desc(irq);
597
598 return desc->timer_rand_state;
599}
600
601static void set_timer_rand_state(unsigned int irq,
602 struct timer_rand_state *state)
603{ 648{
604 struct irq_desc *desc; 649 unsigned long time = get_cycles() ^ jiffies;
605 650
606 desc = irq_to_desc(irq); 651 mix_pool_bytes(&input_pool, buf, size, NULL);
607 652 mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
608 desc->timer_rand_state = state; 653 mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
654 mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
609} 655}
610#endif 656EXPORT_SYMBOL(add_device_randomness);
611 657
612static struct timer_rand_state input_timer_state; 658static struct timer_rand_state input_timer_state;
613 659
@@ -637,13 +683,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
637 goto out; 683 goto out;
638 684
639 sample.jiffies = jiffies; 685 sample.jiffies = jiffies;
640 686 sample.cycles = get_cycles();
641 /* Use arch random value, fall back to cycles */
642 if (!arch_get_random_int(&sample.cycles))
643 sample.cycles = get_cycles();
644
645 sample.num = num; 687 sample.num = num;
646 mix_pool_bytes(&input_pool, &sample, sizeof(sample)); 688 mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
647 689
648 /* 690 /*
649 * Calculate number of bits of randomness we probably added. 691 * Calculate number of bits of randomness we probably added.
@@ -700,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
700} 742}
701EXPORT_SYMBOL_GPL(add_input_randomness); 743EXPORT_SYMBOL_GPL(add_input_randomness);
702 744
703void add_interrupt_randomness(int irq) 745static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
746
747void add_interrupt_randomness(int irq, int irq_flags)
704{ 748{
705 struct timer_rand_state *state; 749 struct entropy_store *r;
750 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
751 struct pt_regs *regs = get_irq_regs();
752 unsigned long now = jiffies;
753 __u32 input[4], cycles = get_cycles();
754
755 input[0] = cycles ^ jiffies;
756 input[1] = irq;
757 if (regs) {
758 __u64 ip = instruction_pointer(regs);
759 input[2] = ip;
760 input[3] = ip >> 32;
761 }
706 762
707 state = get_timer_rand_state(irq); 763 fast_mix(fast_pool, input, sizeof(input));
708 764
709 if (state == NULL) 765 if ((fast_pool->count & 1023) &&
766 !time_after(now, fast_pool->last + HZ))
710 return; 767 return;
711 768
712 DEBUG_ENT("irq event %d\n", irq); 769 fast_pool->last = now;
713 add_timer_randomness(state, 0x100 + irq); 770
771 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
772 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
773 /*
774 * If we don't have a valid cycle counter, and we see
775 * back-to-back timer interrupts, then skip giving credit for
776 * any entropy.
777 */
778 if (cycles == 0) {
779 if (irq_flags & __IRQF_TIMER) {
780 if (fast_pool->last_timer_intr)
781 return;
782 fast_pool->last_timer_intr = 1;
783 } else
784 fast_pool->last_timer_intr = 0;
785 }
786 credit_entropy_bits(r, 1);
714} 787}
715 788
716#ifdef CONFIG_BLOCK 789#ifdef CONFIG_BLOCK
@@ -742,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
742 */ 815 */
743static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) 816static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
744{ 817{
745 __u32 tmp[OUTPUT_POOL_WORDS]; 818 __u32 tmp[OUTPUT_POOL_WORDS];
746 819
747 if (r->pull && r->entropy_count < nbytes * 8 && 820 if (r->pull && r->entropy_count < nbytes * 8 &&
748 r->entropy_count < r->poolinfo->POOLBITS) { 821 r->entropy_count < r->poolinfo->POOLBITS) {
@@ -761,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
761 834
762 bytes = extract_entropy(r->pull, tmp, bytes, 835 bytes = extract_entropy(r->pull, tmp, bytes,
763 random_read_wakeup_thresh / 8, rsvd); 836 random_read_wakeup_thresh / 8, rsvd);
764 mix_pool_bytes(r, tmp, bytes); 837 mix_pool_bytes(r, tmp, bytes, NULL);
765 credit_entropy_bits(r, bytes*8); 838 credit_entropy_bits(r, bytes*8);
766 } 839 }
767} 840}
@@ -820,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
820static void extract_buf(struct entropy_store *r, __u8 *out) 893static void extract_buf(struct entropy_store *r, __u8 *out)
821{ 894{
822 int i; 895 int i;
823 __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; 896 union {
897 __u32 w[5];
898 unsigned long l[LONGS(EXTRACT_SIZE)];
899 } hash;
900 __u32 workspace[SHA_WORKSPACE_WORDS];
824 __u8 extract[64]; 901 __u8 extract[64];
902 unsigned long flags;
825 903
826 /* Generate a hash across the pool, 16 words (512 bits) at a time */ 904 /* Generate a hash across the pool, 16 words (512 bits) at a time */
827 sha_init(hash); 905 sha_init(hash.w);
906 spin_lock_irqsave(&r->lock, flags);
828 for (i = 0; i < r->poolinfo->poolwords; i += 16) 907 for (i = 0; i < r->poolinfo->poolwords; i += 16)
829 sha_transform(hash, (__u8 *)(r->pool + i), workspace); 908 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
830 909
831 /* 910 /*
832 * We mix the hash back into the pool to prevent backtracking 911 * We mix the hash back into the pool to prevent backtracking
@@ -837,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
837 * brute-forcing the feedback as hard as brute-forcing the 916 * brute-forcing the feedback as hard as brute-forcing the
838 * hash. 917 * hash.
839 */ 918 */
840 mix_pool_bytes_extract(r, hash, sizeof(hash), extract); 919 __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
920 spin_unlock_irqrestore(&r->lock, flags);
841 921
842 /* 922 /*
843 * To avoid duplicates, we atomically extract a portion of the 923 * To avoid duplicates, we atomically extract a portion of the
844 * pool while mixing, and hash one final time. 924 * pool while mixing, and hash one final time.
845 */ 925 */
846 sha_transform(hash, extract, workspace); 926 sha_transform(hash.w, extract, workspace);
847 memset(extract, 0, sizeof(extract)); 927 memset(extract, 0, sizeof(extract));
848 memset(workspace, 0, sizeof(workspace)); 928 memset(workspace, 0, sizeof(workspace));
849 929
@@ -852,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
852 * pattern, we fold it in half. Thus, we always feed back 932 * pattern, we fold it in half. Thus, we always feed back
853 * twice as much data as we output. 933 * twice as much data as we output.
854 */ 934 */
855 hash[0] ^= hash[3]; 935 hash.w[0] ^= hash.w[3];
856 hash[1] ^= hash[4]; 936 hash.w[1] ^= hash.w[4];
857 hash[2] ^= rol32(hash[2], 16); 937 hash.w[2] ^= rol32(hash.w[2], 16);
858 memcpy(out, hash, EXTRACT_SIZE); 938
859 memset(hash, 0, sizeof(hash)); 939 /*
940 * If we have a architectural hardware random number
941 * generator, mix that in, too.
942 */
943 for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
944 unsigned long v;
945 if (!arch_get_random_long(&v))
946 break;
947 hash.l[i] ^= v;
948 }
949
950 memcpy(out, &hash, EXTRACT_SIZE);
951 memset(&hash, 0, sizeof(hash));
860} 952}
861 953
862static ssize_t extract_entropy(struct entropy_store *r, void *buf, 954static ssize_t extract_entropy(struct entropy_store *r, void *buf,
863 size_t nbytes, int min, int reserved) 955 size_t nbytes, int min, int reserved)
864{ 956{
865 ssize_t ret = 0, i; 957 ssize_t ret = 0, i;
866 __u8 tmp[EXTRACT_SIZE]; 958 __u8 tmp[EXTRACT_SIZE];
867 unsigned long flags;
868 959
960 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
869 xfer_secondary_pool(r, nbytes); 961 xfer_secondary_pool(r, nbytes);
870 nbytes = account(r, nbytes, min, reserved); 962 nbytes = account(r, nbytes, min, reserved);
871 963
@@ -873,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
873 extract_buf(r, tmp); 965 extract_buf(r, tmp);
874 966
875 if (fips_enabled) { 967 if (fips_enabled) {
968 unsigned long flags;
969
876 spin_lock_irqsave(&r->lock, flags); 970 spin_lock_irqsave(&r->lock, flags);
877 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) 971 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
878 panic("Hardware RNG duplicated output!\n"); 972 panic("Hardware RNG duplicated output!\n");
@@ -898,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
898 ssize_t ret = 0, i; 992 ssize_t ret = 0, i;
899 __u8 tmp[EXTRACT_SIZE]; 993 __u8 tmp[EXTRACT_SIZE];
900 994
995 trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
901 xfer_secondary_pool(r, nbytes); 996 xfer_secondary_pool(r, nbytes);
902 nbytes = account(r, nbytes, 0, 0); 997 nbytes = account(r, nbytes, 0, 0);
903 998
@@ -931,17 +1026,35 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
931 1026
932/* 1027/*
933 * This function is the exported kernel interface. It returns some 1028 * This function is the exported kernel interface. It returns some
934 * number of good random numbers, suitable for seeding TCP sequence 1029 * number of good random numbers, suitable for key generation, seeding
935 * numbers, etc. 1030 * TCP sequence numbers, etc. It does not use the hw random number
1031 * generator, if available; use get_random_bytes_arch() for that.
936 */ 1032 */
937void get_random_bytes(void *buf, int nbytes) 1033void get_random_bytes(void *buf, int nbytes)
938{ 1034{
1035 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
1036}
1037EXPORT_SYMBOL(get_random_bytes);
1038
1039/*
1040 * This function will use the architecture-specific hardware random
1041 * number generator if it is available. The arch-specific hw RNG will
1042 * almost certainly be faster than what we can do in software, but it
1043 * is impossible to verify that it is implemented securely (as
1044 * opposed, to, say, the AES encryption of a sequence number using a
1045 * key known by the NSA). So it's useful if we need the speed, but
1046 * only if we're willing to trust the hardware manufacturer not to
1047 * have put in a back door.
1048 */
1049void get_random_bytes_arch(void *buf, int nbytes)
1050{
939 char *p = buf; 1051 char *p = buf;
940 1052
1053 trace_get_random_bytes(nbytes, _RET_IP_);
941 while (nbytes) { 1054 while (nbytes) {
942 unsigned long v; 1055 unsigned long v;
943 int chunk = min(nbytes, (int)sizeof(unsigned long)); 1056 int chunk = min(nbytes, (int)sizeof(unsigned long));
944 1057
945 if (!arch_get_random_long(&v)) 1058 if (!arch_get_random_long(&v))
946 break; 1059 break;
947 1060
@@ -950,9 +1063,11 @@ void get_random_bytes(void *buf, int nbytes)
950 nbytes -= chunk; 1063 nbytes -= chunk;
951 } 1064 }
952 1065
953 extract_entropy(&nonblocking_pool, p, nbytes, 0, 0); 1066 if (nbytes)
1067 extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
954} 1068}
955EXPORT_SYMBOL(get_random_bytes); 1069EXPORT_SYMBOL(get_random_bytes_arch);
1070
956 1071
957/* 1072/*
958 * init_std_data - initialize pool with system data 1073 * init_std_data - initialize pool with system data
@@ -966,23 +1081,30 @@ EXPORT_SYMBOL(get_random_bytes);
966static void init_std_data(struct entropy_store *r) 1081static void init_std_data(struct entropy_store *r)
967{ 1082{
968 int i; 1083 int i;
969 ktime_t now; 1084 ktime_t now = ktime_get_real();
970 unsigned long flags; 1085 unsigned long rv;
971 1086
972 spin_lock_irqsave(&r->lock, flags);
973 r->entropy_count = 0; 1087 r->entropy_count = 0;
974 spin_unlock_irqrestore(&r->lock, flags); 1088 r->entropy_total = 0;
975 1089 mix_pool_bytes(r, &now, sizeof(now), NULL);
976 now = ktime_get_real(); 1090 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
977 mix_pool_bytes(r, &now, sizeof(now)); 1091 if (!arch_get_random_long(&rv))
978 for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
979 if (!arch_get_random_long(&flags))
980 break; 1092 break;
981 mix_pool_bytes(r, &flags, sizeof(flags)); 1093 mix_pool_bytes(r, &rv, sizeof(rv), NULL);
982 } 1094 }
983 mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); 1095 mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
984} 1096}
985 1097
1098/*
1099 * Note that setup_arch() may call add_device_randomness()
1100 * long before we get here. This allows seeding of the pools
1101 * with some platform dependent data very early in the boot
1102 * process. But it limits our options here. We must use
1103 * statically allocated structures that already have all
1104 * initializations complete at compile time. We should also
1105 * take care not to overwrite the precious per platform data
1106 * we were given.
1107 */
986static int rand_initialize(void) 1108static int rand_initialize(void)
987{ 1109{
988 init_std_data(&input_pool); 1110 init_std_data(&input_pool);
@@ -992,24 +1114,6 @@ static int rand_initialize(void)
992} 1114}
993module_init(rand_initialize); 1115module_init(rand_initialize);
994 1116
995void rand_initialize_irq(int irq)
996{
997 struct timer_rand_state *state;
998
999 state = get_timer_rand_state(irq);
1000
1001 if (state)
1002 return;
1003
1004 /*
1005 * If kzalloc returns null, we just won't use that entropy
1006 * source.
1007 */
1008 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1009 if (state)
1010 set_timer_rand_state(irq, state);
1011}
1012
1013#ifdef CONFIG_BLOCK 1117#ifdef CONFIG_BLOCK
1014void rand_initialize_disk(struct gendisk *disk) 1118void rand_initialize_disk(struct gendisk *disk)
1015{ 1119{
@@ -1117,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1117 count -= bytes; 1221 count -= bytes;
1118 p += bytes; 1222 p += bytes;
1119 1223
1120 mix_pool_bytes(r, buf, bytes); 1224 mix_pool_bytes(r, buf, bytes, NULL);
1121 cond_resched(); 1225 cond_resched();
1122 } 1226 }
1123 1227
@@ -1279,6 +1383,7 @@ static int proc_do_uuid(ctl_table *table, int write,
1279} 1383}
1280 1384
1281static int sysctl_poolsize = INPUT_POOL_WORDS * 32; 1385static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
1386extern ctl_table random_table[];
1282ctl_table random_table[] = { 1387ctl_table random_table[] = {
1283 { 1388 {
1284 .procname = "poolsize", 1389 .procname = "poolsize",
@@ -1344,7 +1449,7 @@ late_initcall(random_int_secret_init);
1344 * value is not cryptographically secure but for several uses the cost of 1449 * value is not cryptographically secure but for several uses the cost of
1345 * depleting entropy is too high 1450 * depleting entropy is too high
1346 */ 1451 */
1347DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash); 1452static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
1348unsigned int get_random_int(void) 1453unsigned int get_random_int(void)
1349{ 1454{
1350 __u32 *hash; 1455 __u32 *hash;