diff options
author | Theodore Ts'o <tytso@mit.edu> | 2012-07-02 07:52:16 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2012-07-14 20:17:28 -0400 |
commit | 775f4b297b780601e61787b766f306ed3e1d23eb (patch) | |
tree | e97e21669f0719636ccac0219f617bad6a0e8b54 /kernel/irq/handle.c | |
parent | 74feec5dd83d879368c1081aec5b6a1cb6dd7ce2 (diff) |
random: make 'add_interrupt_randomness()' do something sane
We've been moving away from add_interrupt_randomness() for various
reasons: it's too expensive to do on every interrupt, and flooding the
CPU with interrupts could theoretically cause bogus floods of entropy
from a somewhat externally controllable source.
This solves both problems by limiting the actual randomness addition
to just once a second or after 64 interrupts, whicever comes first.
During that time, the interrupt cycle data is buffered up in a per-cpu
pool. Also, we make sure the the nonblocking pool used by urandom is
initialized before we start feeding the normal input pool. This
assures that /dev/urandom is returning unpredictable data as soon as
possible.
(Based on an original patch by Linus, but significantly modified by
tytso.)
Tested-by: Eric Wustrow <ewust@umich.edu>
Reported-by: Eric Wustrow <ewust@umich.edu>
Reported-by: Nadia Heninger <nadiah@cs.ucsd.edu>
Reported-by: Zakir Durumeric <zakir@umich.edu>
Reported-by: J. Alex Halderman <jhalderm@umich.edu>.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Cc: stable@vger.kernel.org
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r-- | kernel/irq/handle.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index bdb180325551..131ca176b497 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -133,7 +133,7 @@ irqreturn_t | |||
133 | handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | 133 | handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) |
134 | { | 134 | { |
135 | irqreturn_t retval = IRQ_NONE; | 135 | irqreturn_t retval = IRQ_NONE; |
136 | unsigned int random = 0, irq = desc->irq_data.irq; | 136 | unsigned int flags = 0, irq = desc->irq_data.irq; |
137 | 137 | ||
138 | do { | 138 | do { |
139 | irqreturn_t res; | 139 | irqreturn_t res; |
@@ -161,7 +161,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | |||
161 | 161 | ||
162 | /* Fall through to add to randomness */ | 162 | /* Fall through to add to randomness */ |
163 | case IRQ_HANDLED: | 163 | case IRQ_HANDLED: |
164 | random |= action->flags; | 164 | flags |= action->flags; |
165 | break; | 165 | break; |
166 | 166 | ||
167 | default: | 167 | default: |
@@ -172,8 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | |||
172 | action = action->next; | 172 | action = action->next; |
173 | } while (action); | 173 | } while (action); |
174 | 174 | ||
175 | if (random & IRQF_SAMPLE_RANDOM) | 175 | add_interrupt_randomness(irq, flags); |
176 | add_interrupt_randomness(irq); | ||
177 | 176 | ||
178 | if (!noirqdebug) | 177 | if (!noirqdebug) |
179 | note_interrupt(irq, desc, retval); | 178 | note_interrupt(irq, desc, retval); |