diff options
| -rw-r--r-- | kernel/rcuclassic.c | 36 |
1 files changed, 22 insertions, 14 deletions
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index fb1f1cc45142..c6b6cf55f3e2 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
| @@ -86,8 +86,10 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
| 86 | { | 86 | { |
| 87 | int cpu; | 87 | int cpu; |
| 88 | cpumask_t cpumask; | 88 | cpumask_t cpumask; |
| 89 | unsigned long flags; | ||
| 90 | |||
| 89 | set_need_resched(); | 91 | set_need_resched(); |
| 90 | spin_lock(&rcp->lock); | 92 | spin_lock_irqsave(&rcp->lock, flags); |
| 91 | if (unlikely(!rcp->signaled)) { | 93 | if (unlikely(!rcp->signaled)) { |
| 92 | rcp->signaled = 1; | 94 | rcp->signaled = 1; |
| 93 | /* | 95 | /* |
| @@ -113,7 +115,7 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
| 113 | for_each_cpu_mask_nr(cpu, cpumask) | 115 | for_each_cpu_mask_nr(cpu, cpumask) |
| 114 | smp_send_reschedule(cpu); | 116 | smp_send_reschedule(cpu); |
| 115 | } | 117 | } |
| 116 | spin_unlock(&rcp->lock); | 118 | spin_unlock_irqrestore(&rcp->lock, flags); |
| 117 | } | 119 | } |
| 118 | #else | 120 | #else |
| 119 | static inline void force_quiescent_state(struct rcu_data *rdp, | 121 | static inline void force_quiescent_state(struct rcu_data *rdp, |
| @@ -301,17 +303,18 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) | |||
| 301 | { | 303 | { |
| 302 | int cpu; | 304 | int cpu; |
| 303 | long delta; | 305 | long delta; |
| 306 | unsigned long flags; | ||
| 304 | 307 | ||
| 305 | /* Only let one CPU complain about others per time interval. */ | 308 | /* Only let one CPU complain about others per time interval. */ |
| 306 | 309 | ||
| 307 | spin_lock(&rcp->lock); | 310 | spin_lock_irqsave(&rcp->lock, flags); |
| 308 | delta = get_seconds() - rcp->gp_check; | 311 | delta = get_seconds() - rcp->gp_check; |
| 309 | if (delta < 2L || cpus_empty(rcp->cpumask)) { | 312 | if (delta < 2L || cpus_empty(rcp->cpumask)) { |
| 310 | spin_unlock(&rcp->lock); | 313 | spin_unlock(&rcp->lock); |
| 311 | return; | 314 | return; |
| 312 | } | 315 | } |
| 313 | rcp->gp_check = get_seconds() + 30; | 316 | rcp->gp_check = get_seconds() + 30; |
| 314 | spin_unlock(&rcp->lock); | 317 | spin_unlock_irqrestore(&rcp->lock, flags); |
| 315 | 318 | ||
| 316 | /* OK, time to rat on our buddy... */ | 319 | /* OK, time to rat on our buddy... */ |
| 317 | 320 | ||
| @@ -324,13 +327,15 @@ static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) | |||
| 324 | 327 | ||
| 325 | static void print_cpu_stall(struct rcu_ctrlblk *rcp) | 328 | static void print_cpu_stall(struct rcu_ctrlblk *rcp) |
| 326 | { | 329 | { |
| 330 | unsigned long flags; | ||
| 331 | |||
| 327 | printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu)\n", | 332 | printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu)\n", |
| 328 | smp_processor_id(), get_seconds(), rcp->gp_check); | 333 | smp_processor_id(), get_seconds(), rcp->gp_check); |
| 329 | dump_stack(); | 334 | dump_stack(); |
| 330 | spin_lock(&rcp->lock); | 335 | spin_lock_irqsave(&rcp->lock, flags); |
| 331 | if ((long)(get_seconds() - rcp->gp_check) >= 0L) | 336 | if ((long)(get_seconds() - rcp->gp_check) >= 0L) |
| 332 | rcp->gp_check = get_seconds() + 30; | 337 | rcp->gp_check = get_seconds() + 30; |
| 333 | spin_unlock(&rcp->lock); | 338 | spin_unlock_irqrestore(&rcp->lock, flags); |
| 334 | } | 339 | } |
| 335 | 340 | ||
| 336 | static void check_cpu_stall(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | 341 | static void check_cpu_stall(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
| @@ -413,6 +418,8 @@ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) | |||
| 413 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | 418 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, |
| 414 | struct rcu_data *rdp) | 419 | struct rcu_data *rdp) |
| 415 | { | 420 | { |
| 421 | unsigned long flags; | ||
| 422 | |||
| 416 | if (rdp->quiescbatch != rcp->cur) { | 423 | if (rdp->quiescbatch != rcp->cur) { |
| 417 | /* start new grace period: */ | 424 | /* start new grace period: */ |
| 418 | rdp->qs_pending = 1; | 425 | rdp->qs_pending = 1; |
| @@ -436,7 +443,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
| 436 | return; | 443 | return; |
| 437 | rdp->qs_pending = 0; | 444 | rdp->qs_pending = 0; |
| 438 | 445 | ||
| 439 | spin_lock(&rcp->lock); | 446 | spin_lock_irqsave(&rcp->lock, flags); |
| 440 | /* | 447 | /* |
| 441 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | 448 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync |
| 442 | * during cpu startup. Ignore the quiescent state. | 449 | * during cpu startup. Ignore the quiescent state. |
| @@ -444,7 +451,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
| 444 | if (likely(rdp->quiescbatch == rcp->cur)) | 451 | if (likely(rdp->quiescbatch == rcp->cur)) |
| 445 | cpu_quiet(rdp->cpu, rcp); | 452 | cpu_quiet(rdp->cpu, rcp); |
| 446 | 453 | ||
| 447 | spin_unlock(&rcp->lock); | 454 | spin_unlock_irqrestore(&rcp->lock, flags); |
| 448 | } | 455 | } |
| 449 | 456 | ||
| 450 | 457 | ||
| @@ -469,21 +476,22 @@ static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | |||
| 469 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | 476 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, |
| 470 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | 477 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
| 471 | { | 478 | { |
| 479 | unsigned long flags; | ||
| 480 | |||
| 472 | /* | 481 | /* |
| 473 | * if the cpu going offline owns the grace period | 482 | * if the cpu going offline owns the grace period |
| 474 | * we can block indefinitely waiting for it, so flush | 483 | * we can block indefinitely waiting for it, so flush |
| 475 | * it here | 484 | * it here |
| 476 | */ | 485 | */ |
| 477 | spin_lock_bh(&rcp->lock); | 486 | spin_lock_irqsave(&rcp->lock, flags); |
| 478 | if (rcp->cur != rcp->completed) | 487 | if (rcp->cur != rcp->completed) |
| 479 | cpu_quiet(rdp->cpu, rcp); | 488 | cpu_quiet(rdp->cpu, rcp); |
| 480 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1); | 489 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1); |
| 481 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1); | 490 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1); |
| 482 | spin_unlock_bh(&rcp->lock); | 491 | spin_unlock(&rcp->lock); |
| 483 | 492 | ||
| 484 | local_irq_disable(); | ||
| 485 | this_rdp->qlen += rdp->qlen; | 493 | this_rdp->qlen += rdp->qlen; |
| 486 | local_irq_enable(); | 494 | local_irq_restore(flags); |
| 487 | } | 495 | } |
| 488 | 496 | ||
| 489 | static void rcu_offline_cpu(int cpu) | 497 | static void rcu_offline_cpu(int cpu) |
| @@ -550,12 +558,12 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |||
| 550 | 558 | ||
| 551 | if (rcu_batch_after(rdp->batch, rcp->pending)) { | 559 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
| 552 | /* and start it/schedule start if it's a new batch */ | 560 | /* and start it/schedule start if it's a new batch */ |
| 553 | spin_lock(&rcp->lock); | 561 | spin_lock_irqsave(&rcp->lock, flags); |
| 554 | if (rcu_batch_after(rdp->batch, rcp->pending)) { | 562 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
| 555 | rcp->pending = rdp->batch; | 563 | rcp->pending = rdp->batch; |
| 556 | rcu_start_batch(rcp); | 564 | rcu_start_batch(rcp); |
| 557 | } | 565 | } |
| 558 | spin_unlock(&rcp->lock); | 566 | spin_unlock_irqrestore(&rcp->lock, flags); |
| 559 | } | 567 | } |
| 560 | } | 568 | } |
| 561 | 569 | ||
