aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2012-07-02 07:52:16 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-08-15 15:04:12 -0400
commitaa88dea2270f685349ab7b92169600452fe73b62 (patch)
treeb37b8609441ecb28a12e369795aea4e70bc6794c /kernel
parentf5a1367c1bc150e70e8db9bb6f2892e8e31648c7 (diff)
random: make 'add_interrupt_randomness()' do something sane
commit 775f4b297b780601e61787b766f306ed3e1d23eb upstream. We've been moving away from add_interrupt_randomness() for various reasons: it's too expensive to do on every interrupt, and flooding the CPU with interrupts could theoretically cause bogus floods of entropy from a somewhat externally controllable source. This solves both problems by limiting the actual randomness addition to just once a second or after 64 interrupts, whicever comes first. During that time, the interrupt cycle data is buffered up in a per-cpu pool. Also, we make sure the the nonblocking pool used by urandom is initialized before we start feeding the normal input pool. This assures that /dev/urandom is returning unpredictable data as soon as possible. (Based on an original patch by Linus, but significantly modified by tytso.) Tested-by: Eric Wustrow <ewust@umich.edu> Reported-by: Eric Wustrow <ewust@umich.edu> Reported-by: Nadia Heninger <nadiah@cs.ucsd.edu> Reported-by: Zakir Durumeric <zakir@umich.edu> Reported-by: J. Alex Halderman <jhalderm@umich.edu> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/handle.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 470d08c82bb..10e077289c8 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -117,7 +117,7 @@ irqreturn_t
117handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) 117handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
118{ 118{
119 irqreturn_t retval = IRQ_NONE; 119 irqreturn_t retval = IRQ_NONE;
120 unsigned int random = 0, irq = desc->irq_data.irq; 120 unsigned int flags = 0, irq = desc->irq_data.irq;
121 121
122 do { 122 do {
123 irqreturn_t res; 123 irqreturn_t res;
@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
145 145
146 /* Fall through to add to randomness */ 146 /* Fall through to add to randomness */
147 case IRQ_HANDLED: 147 case IRQ_HANDLED:
148 random |= action->flags; 148 flags |= action->flags;
149 break; 149 break;
150 150
151 default: 151 default:
@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
156 action = action->next; 156 action = action->next;
157 } while (action); 157 } while (action);
158 158
159 if (random & IRQF_SAMPLE_RANDOM) 159 add_interrupt_randomness(irq, flags);
160 add_interrupt_randomness(irq);
161 160
162 if (!noirqdebug) 161 if (!noirqdebug)
163 note_interrupt(irq, desc, retval); 162 note_interrupt(irq, desc, retval);