summaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2012-07-02 07:52:16 -0400
committerTheodore Ts'o <tytso@mit.edu>2012-07-14 20:17:28 -0400
commit775f4b297b780601e61787b766f306ed3e1d23eb (patch)
treee97e21669f0719636ccac0219f617bad6a0e8b54 /drivers/char/random.c
parent74feec5dd83d879368c1081aec5b6a1cb6dd7ce2 (diff)
random: make 'add_interrupt_randomness()' do something sane
We've been moving away from add_interrupt_randomness() for various reasons: it's too expensive to do on every interrupt, and flooding the CPU with interrupts could theoretically cause bogus floods of entropy from a somewhat externally controllable source. This solves both problems by limiting the actual randomness addition to just once a second or after 64 interrupts, whicever comes first. During that time, the interrupt cycle data is buffered up in a per-cpu pool. Also, we make sure the the nonblocking pool used by urandom is initialized before we start feeding the normal input pool. This assures that /dev/urandom is returning unpredictable data as soon as possible. (Based on an original patch by Linus, but significantly modified by tytso.) Tested-by: Eric Wustrow <ewust@umich.edu> Reported-by: Eric Wustrow <ewust@umich.edu> Reported-by: Nadia Heninger <nadiah@cs.ucsd.edu> Reported-by: Zakir Durumeric <zakir@umich.edu> Reported-by: J. Alex Halderman <jhalderm@umich.edu>. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Cc: stable@vger.kernel.org
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c103
1 files changed, 86 insertions, 17 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index cb541b9a5231..9fcceace239c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -127,19 +127,15 @@
127 * 127 *
128 * void add_input_randomness(unsigned int type, unsigned int code, 128 * void add_input_randomness(unsigned int type, unsigned int code,
129 * unsigned int value); 129 * unsigned int value);
130 * void add_interrupt_randomness(int irq); 130 * void add_interrupt_randomness(int irq, int irq_flags);
131 * void add_disk_randomness(struct gendisk *disk); 131 * void add_disk_randomness(struct gendisk *disk);
132 * 132 *
133 * add_input_randomness() uses the input layer interrupt timing, as well as 133 * add_input_randomness() uses the input layer interrupt timing, as well as
134 * the event type information from the hardware. 134 * the event type information from the hardware.
135 * 135 *
136 * add_interrupt_randomness() uses the inter-interrupt timing as random 136 * add_interrupt_randomness() uses the interrupt timing as random
137 * inputs to the entropy pool. Note that not all interrupts are good 137 * inputs to the entropy pool. Using the cycle counters and the irq source
138 * sources of randomness! For example, the timer interrupts is not a 138 * as inputs, it feeds the randomness roughly once a second.
139 * good choice, because the periodicity of the interrupts is too
140 * regular, and hence predictable to an attacker. Network Interface
141 * Controller interrupts are a better measure, since the timing of the
142 * NIC interrupts are more unpredictable.
143 * 139 *
144 * add_disk_randomness() uses what amounts to the seek time of block 140 * add_disk_randomness() uses what amounts to the seek time of block
145 * layer request events, on a per-disk_devt basis, as input to the 141 * layer request events, on a per-disk_devt basis, as input to the
@@ -248,6 +244,7 @@
248#include <linux/percpu.h> 244#include <linux/percpu.h>
249#include <linux/cryptohash.h> 245#include <linux/cryptohash.h>
250#include <linux/fips.h> 246#include <linux/fips.h>
247#include <linux/ptrace.h>
251 248
252#ifdef CONFIG_GENERIC_HARDIRQS 249#ifdef CONFIG_GENERIC_HARDIRQS
253# include <linux/irq.h> 250# include <linux/irq.h>
@@ -256,6 +253,7 @@
256#include <asm/processor.h> 253#include <asm/processor.h>
257#include <asm/uaccess.h> 254#include <asm/uaccess.h>
258#include <asm/irq.h> 255#include <asm/irq.h>
256#include <asm/irq_regs.h>
259#include <asm/io.h> 257#include <asm/io.h>
260 258
261/* 259/*
@@ -421,7 +419,9 @@ struct entropy_store {
421 spinlock_t lock; 419 spinlock_t lock;
422 unsigned add_ptr; 420 unsigned add_ptr;
423 int entropy_count; 421 int entropy_count;
422 int entropy_total;
424 int input_rotate; 423 int input_rotate;
424 unsigned int initialized:1;
425 __u8 last_data[EXTRACT_SIZE]; 425 __u8 last_data[EXTRACT_SIZE];
426}; 426};
427 427
@@ -454,6 +454,10 @@ static struct entropy_store nonblocking_pool = {
454 .pool = nonblocking_pool_data 454 .pool = nonblocking_pool_data
455}; 455};
456 456
457static __u32 const twist_table[8] = {
458 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
459 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
460
457/* 461/*
458 * This function adds bytes into the entropy "pool". It does not 462 * This function adds bytes into the entropy "pool". It does not
459 * update the entropy estimate. The caller should call 463 * update the entropy estimate. The caller should call
@@ -467,9 +471,6 @@ static struct entropy_store nonblocking_pool = {
467static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, 471static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
468 int nbytes, __u8 out[64]) 472 int nbytes, __u8 out[64])
469{ 473{
470 static __u32 const twist_table[8] = {
471 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
472 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
473 unsigned long i, j, tap1, tap2, tap3, tap4, tap5; 474 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
474 int input_rotate; 475 int input_rotate;
475 int wordmask = r->poolinfo->poolwords - 1; 476 int wordmask = r->poolinfo->poolwords - 1;
@@ -528,6 +529,36 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
528 mix_pool_bytes_extract(r, in, bytes, NULL); 529 mix_pool_bytes_extract(r, in, bytes, NULL);
529} 530}
530 531
532struct fast_pool {
533 __u32 pool[4];
534 unsigned long last;
535 unsigned short count;
536 unsigned char rotate;
537 unsigned char last_timer_intr;
538};
539
540/*
541 * This is a fast mixing routine used by the interrupt randomness
542 * collector. It's hardcoded for an 128 bit pool and assumes that any
543 * locks that might be needed are taken by the caller.
544 */
545static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
546{
547 const char *bytes = in;
548 __u32 w;
549 unsigned i = f->count;
550 unsigned input_rotate = f->rotate;
551
552 while (nbytes--) {
553 w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
554 f->pool[(i + 1) & 3];
555 f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
556 input_rotate += (i++ & 3) ? 7 : 14;
557 }
558 f->count = i;
559 f->rotate = input_rotate;
560}
561
531/* 562/*
532 * Credit (or debit) the entropy store with n bits of entropy 563 * Credit (or debit) the entropy store with n bits of entropy
533 */ 564 */
@@ -551,6 +582,12 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
551 entropy_count = r->poolinfo->POOLBITS; 582 entropy_count = r->poolinfo->POOLBITS;
552 r->entropy_count = entropy_count; 583 r->entropy_count = entropy_count;
553 584
585 if (!r->initialized && nbits > 0) {
586 r->entropy_total += nbits;
587 if (r->entropy_total > 128)
588 r->initialized = 1;
589 }
590
554 /* should we wake readers? */ 591 /* should we wake readers? */
555 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { 592 if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
556 wake_up_interruptible(&random_read_wait); 593 wake_up_interruptible(&random_read_wait);
@@ -700,17 +737,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
700} 737}
701EXPORT_SYMBOL_GPL(add_input_randomness); 738EXPORT_SYMBOL_GPL(add_input_randomness);
702 739
703void add_interrupt_randomness(int irq) 740static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
741
742void add_interrupt_randomness(int irq, int irq_flags)
704{ 743{
705 struct timer_rand_state *state; 744 struct entropy_store *r;
745 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
746 struct pt_regs *regs = get_irq_regs();
747 unsigned long now = jiffies;
748 __u32 input[4], cycles = get_cycles();
749
750 input[0] = cycles ^ jiffies;
751 input[1] = irq;
752 if (regs) {
753 __u64 ip = instruction_pointer(regs);
754 input[2] = ip;
755 input[3] = ip >> 32;
756 }
706 757
707 state = get_timer_rand_state(irq); 758 fast_mix(fast_pool, input, sizeof(input));
708 759
709 if (state == NULL) 760 if ((fast_pool->count & 1023) &&
761 !time_after(now, fast_pool->last + HZ))
710 return; 762 return;
711 763
712 DEBUG_ENT("irq event %d\n", irq); 764 fast_pool->last = now;
713 add_timer_randomness(state, 0x100 + irq); 765
766 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
767 mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
768 /*
769 * If we don't have a valid cycle counter, and we see
770 * back-to-back timer interrupts, then skip giving credit for
771 * any entropy.
772 */
773 if (cycles == 0) {
774 if (irq_flags & __IRQF_TIMER) {
775 if (fast_pool->last_timer_intr)
776 return;
777 fast_pool->last_timer_intr = 1;
778 } else
779 fast_pool->last_timer_intr = 0;
780 }
781 credit_entropy_bits(r, 1);
714} 782}
715 783
716#ifdef CONFIG_BLOCK 784#ifdef CONFIG_BLOCK
@@ -971,6 +1039,7 @@ static void init_std_data(struct entropy_store *r)
971 1039
972 spin_lock_irqsave(&r->lock, flags); 1040 spin_lock_irqsave(&r->lock, flags);
973 r->entropy_count = 0; 1041 r->entropy_count = 0;
1042 r->entropy_total = 0;
974 spin_unlock_irqrestore(&r->lock, flags); 1043 spin_unlock_irqrestore(&r->lock, flags);
975 1044
976 now = ktime_get_real(); 1045 now = ktime_get_real();