diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2008-08-21 09:14:55 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-21 10:01:02 -0400 |
commit | 275a89bdd3868af3008852594d2e169eaf69441b (patch) | |
tree | 81ad2537fe871a3e583013dabf6fdba539773332 | |
parent | 0c925d79234fe77589d8ff3861f9f8bb9e7fc3f6 (diff) |
rcu: use irq-safe locks
Some earlier tip/core/rcu patches caused RCU to incorrectly enable irqs
too early in boot. This caused Yinghai's repeated-kexec testing to
hit oopses, presumably due to so that device interrupts left over from
the prior kernel instance (which would oops the newly booting kernel
before it got a chance to reset said devices). This patch therefore
converts all the local_irq_disable()s in rcuclassic.c to local_irq_save().
Besides, I never did like local_irq_disable() anyway. ;-)
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/rcuclassic.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 01e761a6b38c..3f6918966bda 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -247,6 +247,7 @@ static inline void raise_rcu_softirq(void) | |||
247 | */ | 247 | */ |
248 | static void rcu_do_batch(struct rcu_data *rdp) | 248 | static void rcu_do_batch(struct rcu_data *rdp) |
249 | { | 249 | { |
250 | unsigned long flags; | ||
250 | struct rcu_head *next, *list; | 251 | struct rcu_head *next, *list; |
251 | int count = 0; | 252 | int count = 0; |
252 | 253 | ||
@@ -261,9 +262,9 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
261 | } | 262 | } |
262 | rdp->donelist = list; | 263 | rdp->donelist = list; |
263 | 264 | ||
264 | local_irq_disable(); | 265 | local_irq_save(flags); |
265 | rdp->qlen -= count; | 266 | rdp->qlen -= count; |
266 | local_irq_enable(); | 267 | local_irq_restore(flags); |
267 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) | 268 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) |
268 | rdp->blimit = blimit; | 269 | rdp->blimit = blimit; |
269 | 270 | ||
@@ -464,12 +465,14 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
464 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | 465 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, |
465 | struct rcu_head **tail, long batch) | 466 | struct rcu_head **tail, long batch) |
466 | { | 467 | { |
468 | unsigned long flags; | ||
469 | |||
467 | if (list) { | 470 | if (list) { |
468 | local_irq_disable(); | 471 | local_irq_save(flags); |
469 | this_rdp->batch = batch; | 472 | this_rdp->batch = batch; |
470 | *this_rdp->nxttail[2] = list; | 473 | *this_rdp->nxttail[2] = list; |
471 | this_rdp->nxttail[2] = tail; | 474 | this_rdp->nxttail[2] = tail; |
472 | local_irq_enable(); | 475 | local_irq_restore(flags); |
473 | } | 476 | } |
474 | } | 477 | } |
475 | 478 | ||
@@ -521,10 +524,11 @@ static void rcu_offline_cpu(int cpu) | |||
521 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | 524 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, |
522 | struct rcu_data *rdp) | 525 | struct rcu_data *rdp) |
523 | { | 526 | { |
527 | unsigned long flags; | ||
524 | long completed_snap; | 528 | long completed_snap; |
525 | 529 | ||
526 | if (rdp->nxtlist) { | 530 | if (rdp->nxtlist) { |
527 | local_irq_disable(); | 531 | local_irq_save(flags); |
528 | completed_snap = ACCESS_ONCE(rcp->completed); | 532 | completed_snap = ACCESS_ONCE(rcp->completed); |
529 | 533 | ||
530 | /* | 534 | /* |
@@ -554,7 +558,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |||
554 | rdp->nxttail[0] = &rdp->nxtlist; | 558 | rdp->nxttail[0] = &rdp->nxtlist; |
555 | } | 559 | } |
556 | 560 | ||
557 | local_irq_enable(); | 561 | local_irq_restore(flags); |
558 | 562 | ||
559 | if (rcu_batch_after(rdp->batch, rcp->pending)) { | 563 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
560 | unsigned long flags; | 564 | unsigned long flags; |