aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2014-06-14 03:06:57 -0400
committerTheodore Ts'o <tytso@mit.edu>2014-07-15 04:49:39 -0400
commit840f95077ffd640df9c74ad9796fa094a5c8075a (patch)
tree70cd554b7b5d7badbefdffc06067b28ad09fd749 /drivers/char/random.c
parentcff850312cc7c0e0b9fe8b573687812dea232031 (diff)
random: clean up interrupt entropy accounting for archs w/o cycle counters
For architectures that don't have cycle counters, the algorithm for deciding when to avoid giving entropy credit due to back-to-back timer interrupts didn't make any sense, since we were checking every 64 interrupts. Change it so that we only give an entropy credit if the majority of the interrupts are not based on the timer. Signed-off-by: Theodore Ts'o <tytso@mit.edu> Cc: George Spelvin <linux@horizon.com>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 364a8001a2bd..dfe918a21e32 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -548,9 +548,9 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in,
548struct fast_pool { 548struct fast_pool {
549 __u32 pool[4]; 549 __u32 pool[4];
550 unsigned long last; 550 unsigned long last;
551 unsigned short count; 551 unsigned char count;
552 unsigned char notimer_count;
552 unsigned char rotate; 553 unsigned char rotate;
553 unsigned char last_timer_intr;
554}; 554};
555 555
556/* 556/*
@@ -850,15 +850,23 @@ void add_interrupt_randomness(int irq, int irq_flags)
850 input[3] = ip >> 32; 850 input[3] = ip >> 32;
851 851
852 fast_mix(fast_pool, input); 852 fast_mix(fast_pool, input);
853 if ((irq_flags & __IRQF_TIMER) == 0)
854 fast_pool->notimer_count++;
853 855
854 if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ)) 856 if (cycles) {
855 return; 857 if ((fast_pool->count < 64) &&
858 !time_after(now, fast_pool->last + HZ))
859 return;
860 } else {
861 /* CPU does not have a cycle counting register :-( */
862 if (fast_pool->count < 64)
863 return;
864 }
856 865
857 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; 866 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
858 if (!spin_trylock(&r->lock)) { 867 if (!spin_trylock(&r->lock))
859 fast_pool->count--;
860 return; 868 return;
861 } 869
862 fast_pool->last = now; 870 fast_pool->last = now;
863 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); 871 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
864 872
@@ -874,19 +882,15 @@ void add_interrupt_randomness(int irq, int irq_flags)
874 spin_unlock(&r->lock); 882 spin_unlock(&r->lock);
875 883
876 /* 884 /*
877 * If we don't have a valid cycle counter, and we see 885 * If we have a valid cycle counter or if the majority of
878 * back-to-back timer interrupts, then skip giving credit for 886 * interrupts collected were non-timer interrupts, then give
879 * any entropy, otherwise credit 1 bit. 887 * an entropy credit of 1 bit. Yes, this is being very
888 * conservative.
880 */ 889 */
881 credit++; 890 if (cycles || (fast_pool->notimer_count >= 32))
882 if (cycles == 0) { 891 credit++;
883 if (irq_flags & __IRQF_TIMER) { 892
884 if (fast_pool->last_timer_intr) 893 fast_pool->count = fast_pool->notimer_count = 0;
885 credit--;
886 fast_pool->last_timer_intr = 1;
887 } else
888 fast_pool->last_timer_intr = 0;
889 }
890 894
891 credit_entropy_bits(r, credit); 895 credit_entropy_bits(r, credit);
892} 896}