aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c369
1 files changed, 213 insertions, 156 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7717b95c2027..52b06f6e158c 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -25,7 +25,7 @@
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * 26 *
27 * For detailed explanation of Read-Copy Update mechanism see - 27 * For detailed explanation of Read-Copy Update mechanism see -
28 * Documentation/RCU 28 * Documentation/RCU
29 */ 29 */
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/kernel.h> 31#include <linux/kernel.h>
@@ -35,6 +35,7 @@
35#include <linux/rcupdate.h> 35#include <linux/rcupdate.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/nmi.h>
38#include <asm/atomic.h> 39#include <asm/atomic.h>
39#include <linux/bitops.h> 40#include <linux/bitops.h>
40#include <linux/module.h> 41#include <linux/module.h>
@@ -46,6 +47,8 @@
46#include <linux/mutex.h> 47#include <linux/mutex.h>
47#include <linux/time.h> 48#include <linux/time.h>
48 49
50#include "rcutree.h"
51
49#ifdef CONFIG_DEBUG_LOCK_ALLOC 52#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key; 53static struct lock_class_key rcu_lock_key;
51struct lockdep_map rcu_lock_map = 54struct lockdep_map rcu_lock_map =
@@ -72,30 +75,55 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
72 .n_force_qs_ngp = 0, \ 75 .n_force_qs_ngp = 0, \
73} 76}
74 77
75struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); 78struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state);
76DEFINE_PER_CPU(struct rcu_data, rcu_data); 79DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
77 80
78struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
79DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
80 83
84extern long rcu_batches_completed_sched(void);
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
87 struct rcu_node *rnp, unsigned long flags);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
89#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp,
93 struct rcu_data *rdp);
94static void __call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *rcu),
96 struct rcu_state *rsp);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
99 int preemptable);
100
101#include "rcutree_plugin.h"
102
81/* 103/*
82 * Increment the quiescent state counter. 104 * Note a quiescent state. Because we do not need to know
83 * The counter is a bit degenerated: We do not need to know
84 * how many quiescent states passed, just if there was at least 105 * how many quiescent states passed, just if there was at least
85 * one since the start of the grace period. Thus just a flag. 106 * one since the start of the grace period, this just sets a flag.
86 */ 107 */
87void rcu_qsctr_inc(int cpu) 108void rcu_sched_qs(int cpu)
88{ 109{
89 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 110 struct rcu_data *rdp;
90 rdp->passed_quiesc = 1; 111
112 rdp = &per_cpu(rcu_sched_data, cpu);
91 rdp->passed_quiesc_completed = rdp->completed; 113 rdp->passed_quiesc_completed = rdp->completed;
114 barrier();
115 rdp->passed_quiesc = 1;
116 rcu_preempt_note_context_switch(cpu);
92} 117}
93 118
94void rcu_bh_qsctr_inc(int cpu) 119void rcu_bh_qs(int cpu)
95{ 120{
96 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 121 struct rcu_data *rdp;
97 rdp->passed_quiesc = 1; 122
123 rdp = &per_cpu(rcu_bh_data, cpu);
98 rdp->passed_quiesc_completed = rdp->completed; 124 rdp->passed_quiesc_completed = rdp->completed;
125 barrier();
126 rdp->passed_quiesc = 1;
99} 127}
100 128
101#ifdef CONFIG_NO_HZ 129#ifdef CONFIG_NO_HZ
@@ -110,15 +138,16 @@ static int qhimark = 10000; /* If this many pending, ignore blimit. */
110static int qlowmark = 100; /* Once only this many pending, use blimit. */ 138static int qlowmark = 100; /* Once only this many pending, use blimit. */
111 139
112static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 140static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
141static int rcu_pending(int cpu);
113 142
114/* 143/*
115 * Return the number of RCU batches processed thus far for debug & stats. 144 * Return the number of RCU-sched batches processed thus far for debug & stats.
116 */ 145 */
117long rcu_batches_completed(void) 146long rcu_batches_completed_sched(void)
118{ 147{
119 return rcu_state.completed; 148 return rcu_sched_state.completed;
120} 149}
121EXPORT_SYMBOL_GPL(rcu_batches_completed); 150EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
122 151
123/* 152/*
124 * Return the number of RCU BH batches processed thus far for debug & stats. 153 * Return the number of RCU BH batches processed thus far for debug & stats.
@@ -181,6 +210,10 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
181 return 1; 210 return 1;
182 } 211 }
183 212
213 /* If preemptable RCU, no point in sending reschedule IPI. */
214 if (rdp->preemptable)
215 return 0;
216
184 /* The CPU is online, so send it a reschedule IPI. */ 217 /* The CPU is online, so send it a reschedule IPI. */
185 if (rdp->cpu != smp_processor_id()) 218 if (rdp->cpu != smp_processor_id())
186 smp_send_reschedule(rdp->cpu); 219 smp_send_reschedule(rdp->cpu);
@@ -193,7 +226,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
193#endif /* #ifdef CONFIG_SMP */ 226#endif /* #ifdef CONFIG_SMP */
194 227
195#ifdef CONFIG_NO_HZ 228#ifdef CONFIG_NO_HZ
196static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5);
197 229
198/** 230/**
199 * rcu_enter_nohz - inform RCU that current CPU is entering nohz 231 * rcu_enter_nohz - inform RCU that current CPU is entering nohz
@@ -213,7 +245,7 @@ void rcu_enter_nohz(void)
213 rdtp = &__get_cpu_var(rcu_dynticks); 245 rdtp = &__get_cpu_var(rcu_dynticks);
214 rdtp->dynticks++; 246 rdtp->dynticks++;
215 rdtp->dynticks_nesting--; 247 rdtp->dynticks_nesting--;
216 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); 248 WARN_ON_ONCE(rdtp->dynticks & 0x1);
217 local_irq_restore(flags); 249 local_irq_restore(flags);
218} 250}
219 251
@@ -232,7 +264,7 @@ void rcu_exit_nohz(void)
232 rdtp = &__get_cpu_var(rcu_dynticks); 264 rdtp = &__get_cpu_var(rcu_dynticks);
233 rdtp->dynticks++; 265 rdtp->dynticks++;
234 rdtp->dynticks_nesting++; 266 rdtp->dynticks_nesting++;
235 WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); 267 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
236 local_irq_restore(flags); 268 local_irq_restore(flags);
237 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 269 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
238} 270}
@@ -251,7 +283,7 @@ void rcu_nmi_enter(void)
251 if (rdtp->dynticks & 0x1) 283 if (rdtp->dynticks & 0x1)
252 return; 284 return;
253 rdtp->dynticks_nmi++; 285 rdtp->dynticks_nmi++;
254 WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs); 286 WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1));
255 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 287 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
256} 288}
257 289
@@ -270,7 +302,7 @@ void rcu_nmi_exit(void)
270 return; 302 return;
271 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 303 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
272 rdtp->dynticks_nmi++; 304 rdtp->dynticks_nmi++;
273 WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs); 305 WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1);
274} 306}
275 307
276/** 308/**
@@ -286,7 +318,7 @@ void rcu_irq_enter(void)
286 if (rdtp->dynticks_nesting++) 318 if (rdtp->dynticks_nesting++)
287 return; 319 return;
288 rdtp->dynticks++; 320 rdtp->dynticks++;
289 WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); 321 WARN_ON_ONCE(!(rdtp->dynticks & 0x1));
290 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ 322 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
291} 323}
292 324
@@ -305,10 +337,10 @@ void rcu_irq_exit(void)
305 return; 337 return;
306 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 338 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
307 rdtp->dynticks++; 339 rdtp->dynticks++;
308 WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); 340 WARN_ON_ONCE(rdtp->dynticks & 0x1);
309 341
310 /* If the interrupt queued a callback, get out of dyntick mode. */ 342 /* If the interrupt queued a callback, get out of dyntick mode. */
311 if (__get_cpu_var(rcu_data).nxtlist || 343 if (__get_cpu_var(rcu_sched_data).nxtlist ||
312 __get_cpu_var(rcu_bh_data).nxtlist) 344 __get_cpu_var(rcu_bh_data).nxtlist)
313 set_need_resched(); 345 set_need_resched();
314} 346}
@@ -461,6 +493,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
461 493
462 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 494 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
463 for (; rnp_cur < rnp_end; rnp_cur++) { 495 for (; rnp_cur < rnp_end; rnp_cur++) {
496 rcu_print_task_stall(rnp);
464 if (rnp_cur->qsmask == 0) 497 if (rnp_cur->qsmask == 0)
465 continue; 498 continue;
466 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 499 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++)
@@ -469,6 +502,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
469 } 502 }
470 printk(" (detected by %d, t=%ld jiffies)\n", 503 printk(" (detected by %d, t=%ld jiffies)\n",
471 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 504 smp_processor_id(), (long)(jiffies - rsp->gp_start));
505 trigger_all_cpu_backtrace();
506
472 force_quiescent_state(rsp, 0); /* Kick them all. */ 507 force_quiescent_state(rsp, 0); /* Kick them all. */
473} 508}
474 509
@@ -479,12 +514,14 @@ static void print_cpu_stall(struct rcu_state *rsp)
479 514
480 printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", 515 printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n",
481 smp_processor_id(), jiffies - rsp->gp_start); 516 smp_processor_id(), jiffies - rsp->gp_start);
482 dump_stack(); 517 trigger_all_cpu_backtrace();
518
483 spin_lock_irqsave(&rnp->lock, flags); 519 spin_lock_irqsave(&rnp->lock, flags);
484 if ((long)(jiffies - rsp->jiffies_stall) >= 0) 520 if ((long)(jiffies - rsp->jiffies_stall) >= 0)
485 rsp->jiffies_stall = 521 rsp->jiffies_stall =
486 jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 522 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
487 spin_unlock_irqrestore(&rnp->lock, flags); 523 spin_unlock_irqrestore(&rnp->lock, flags);
524
488 set_need_resched(); /* kick ourselves to get things going. */ 525 set_need_resched(); /* kick ourselves to get things going. */
489} 526}
490 527
@@ -564,8 +601,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
564{ 601{
565 struct rcu_data *rdp = rsp->rda[smp_processor_id()]; 602 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
566 struct rcu_node *rnp = rcu_get_root(rsp); 603 struct rcu_node *rnp = rcu_get_root(rsp);
567 struct rcu_node *rnp_cur;
568 struct rcu_node *rnp_end;
569 604
570 if (!cpu_needs_another_gp(rsp, rdp)) { 605 if (!cpu_needs_another_gp(rsp, rdp)) {
571 spin_unlock_irqrestore(&rnp->lock, flags); 606 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -574,6 +609,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
574 609
575 /* Advance to a new grace period and initialize state. */ 610 /* Advance to a new grace period and initialize state. */
576 rsp->gpnum++; 611 rsp->gpnum++;
612 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
577 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 613 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
578 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 614 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
579 record_gp_stall_check_time(rsp); 615 record_gp_stall_check_time(rsp);
@@ -590,7 +626,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
590 626
591 /* Special-case the common single-level case. */ 627 /* Special-case the common single-level case. */
592 if (NUM_RCU_NODES == 1) { 628 if (NUM_RCU_NODES == 1) {
629 rcu_preempt_check_blocked_tasks(rnp);
593 rnp->qsmask = rnp->qsmaskinit; 630 rnp->qsmask = rnp->qsmaskinit;
631 rnp->gpnum = rsp->gpnum;
594 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 632 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
595 spin_unlock_irqrestore(&rnp->lock, flags); 633 spin_unlock_irqrestore(&rnp->lock, flags);
596 return; 634 return;
@@ -603,42 +641,28 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
603 spin_lock(&rsp->onofflock); /* irqs already disabled. */ 641 spin_lock(&rsp->onofflock); /* irqs already disabled. */
604 642
605 /* 643 /*
606 * Set the quiescent-state-needed bits in all the non-leaf RCU 644 * Set the quiescent-state-needed bits in all the rcu_node
607 * nodes for all currently online CPUs. This operation relies 645 * structures for all currently online CPUs in breadth-first
608 * on the layout of the hierarchy within the rsp->node[] array. 646 * order, starting from the root rcu_node structure. This
609 * Note that other CPUs will access only the leaves of the 647 * operation relies on the layout of the hierarchy within the
610 * hierarchy, which still indicate that no grace period is in 648 * rsp->node[] array. Note that other CPUs will access only
611 * progress. In addition, we have excluded CPU-hotplug operations. 649 * the leaves of the hierarchy, which still indicate that no
612 * 650 * grace period is in progress, at least until the corresponding
613 * We therefore do not need to hold any locks. Any required 651 * leaf node has been initialized. In addition, we have excluded
614 * memory barriers will be supplied by the locks guarding the 652 * CPU-hotplug operations.
615 * leaf rcu_nodes in the hierarchy.
616 */
617
618 rnp_end = rsp->level[NUM_RCU_LVLS - 1];
619 for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++)
620 rnp_cur->qsmask = rnp_cur->qsmaskinit;
621
622 /*
623 * Now set up the leaf nodes. Here we must be careful. First,
624 * we need to hold the lock in order to exclude other CPUs, which
625 * might be contending for the leaf nodes' locks. Second, as
626 * soon as we initialize a given leaf node, its CPUs might run
627 * up the rest of the hierarchy. We must therefore acquire locks
628 * for each node that we touch during this stage. (But we still
629 * are excluding CPU-hotplug operations.)
630 * 653 *
631 * Note that the grace period cannot complete until we finish 654 * Note that the grace period cannot complete until we finish
632 * the initialization process, as there will be at least one 655 * the initialization process, as there will be at least one
633 * qsmask bit set in the root node until that time, namely the 656 * qsmask bit set in the root node until that time, namely the
634 * one corresponding to this CPU. 657 * one corresponding to this CPU, due to the fact that we have
658 * irqs disabled.
635 */ 659 */
636 rnp_end = &rsp->node[NUM_RCU_NODES]; 660 for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) {
637 rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 661 spin_lock(&rnp->lock); /* irqs already disabled. */
638 for (; rnp_cur < rnp_end; rnp_cur++) { 662 rcu_preempt_check_blocked_tasks(rnp);
639 spin_lock(&rnp_cur->lock); /* irqs already disabled. */ 663 rnp->qsmask = rnp->qsmaskinit;
640 rnp_cur->qsmask = rnp_cur->qsmaskinit; 664 rnp->gpnum = rsp->gpnum;
641 spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ 665 spin_unlock(&rnp->lock); /* irqs already disabled. */
642 } 666 }
643 667
644 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 668 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
@@ -674,6 +698,20 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
674} 698}
675 699
676/* 700/*
701 * Clean up after the prior grace period and let rcu_start_gp() start up
702 * the next grace period if one is needed. Note that the caller must
703 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
704 */
705static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
706 __releases(rnp->lock)
707{
708 WARN_ON_ONCE(rsp->completed == rsp->gpnum);
709 rsp->completed = rsp->gpnum;
710 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
711 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
712}
713
714/*
677 * Similar to cpu_quiet(), for which it is a helper function. Allows 715 * Similar to cpu_quiet(), for which it is a helper function. Allows
678 * a group of CPUs to be quieted at one go, though all the CPUs in the 716 * a group of CPUs to be quieted at one go, though all the CPUs in the
679 * group must be represented by the same leaf rcu_node structure. 717 * group must be represented by the same leaf rcu_node structure.
@@ -685,6 +723,8 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
685 unsigned long flags) 723 unsigned long flags)
686 __releases(rnp->lock) 724 __releases(rnp->lock)
687{ 725{
726 struct rcu_node *rnp_c;
727
688 /* Walk up the rcu_node hierarchy. */ 728 /* Walk up the rcu_node hierarchy. */
689 for (;;) { 729 for (;;) {
690 if (!(rnp->qsmask & mask)) { 730 if (!(rnp->qsmask & mask)) {
@@ -694,7 +734,7 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
694 return; 734 return;
695 } 735 }
696 rnp->qsmask &= ~mask; 736 rnp->qsmask &= ~mask;
697 if (rnp->qsmask != 0) { 737 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
698 738
699 /* Other bits still set at this level, so done. */ 739 /* Other bits still set at this level, so done. */
700 spin_unlock_irqrestore(&rnp->lock, flags); 740 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -708,28 +748,26 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
708 break; 748 break;
709 } 749 }
710 spin_unlock_irqrestore(&rnp->lock, flags); 750 spin_unlock_irqrestore(&rnp->lock, flags);
751 rnp_c = rnp;
711 rnp = rnp->parent; 752 rnp = rnp->parent;
712 spin_lock_irqsave(&rnp->lock, flags); 753 spin_lock_irqsave(&rnp->lock, flags);
754 WARN_ON_ONCE(rnp_c->qsmask);
713 } 755 }
714 756
715 /* 757 /*
716 * Get here if we are the last CPU to pass through a quiescent 758 * Get here if we are the last CPU to pass through a quiescent
717 * state for this grace period. Clean up and let rcu_start_gp() 759 * state for this grace period. Invoke cpu_quiet_msk_finish()
718 * start up the next grace period if one is needed. Note that 760 * to clean up and start the next grace period if one is needed.
719 * we still hold rnp->lock, as required by rcu_start_gp(), which
720 * will release it.
721 */ 761 */
722 rsp->completed = rsp->gpnum; 762 cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */
723 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
724 rcu_start_gp(rsp, flags); /* releases rnp->lock. */
725} 763}
726 764
727/* 765/*
728 * Record a quiescent state for the specified CPU, which must either be 766 * Record a quiescent state for the specified CPU, which must either be
729 * the current CPU or an offline CPU. The lastcomp argument is used to 767 * the current CPU. The lastcomp argument is used to make sure we are
730 * make sure we are still in the grace period of interest. We don't want 768 * still in the grace period of interest. We don't want to end the current
731 * to end the current grace period based on quiescent states detected in 769 * grace period based on quiescent states detected in an earlier grace
732 * an earlier grace period! 770 * period!
733 */ 771 */
734static void 772static void
735cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 773cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
@@ -764,7 +802,6 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
764 * This GP can't end until cpu checks in, so all of our 802 * This GP can't end until cpu checks in, so all of our
765 * callbacks can be processed during the next GP. 803 * callbacks can be processed during the next GP.
766 */ 804 */
767 rdp = rsp->rda[smp_processor_id()];
768 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 805 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
769 806
770 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ 807 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
@@ -822,30 +859,28 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
822 spin_lock_irqsave(&rsp->onofflock, flags); 859 spin_lock_irqsave(&rsp->onofflock, flags);
823 860
824 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 861 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
825 rnp = rdp->mynode; 862 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
826 mask = rdp->grpmask; /* rnp->grplo is constant. */ 863 mask = rdp->grpmask; /* rnp->grplo is constant. */
827 do { 864 do {
828 spin_lock(&rnp->lock); /* irqs already disabled. */ 865 spin_lock(&rnp->lock); /* irqs already disabled. */
829 rnp->qsmaskinit &= ~mask; 866 rnp->qsmaskinit &= ~mask;
830 if (rnp->qsmaskinit != 0) { 867 if (rnp->qsmaskinit != 0) {
831 spin_unlock(&rnp->lock); /* irqs already disabled. */ 868 spin_unlock(&rnp->lock); /* irqs remain disabled. */
832 break; 869 break;
833 } 870 }
871 rcu_preempt_offline_tasks(rsp, rnp, rdp);
834 mask = rnp->grpmask; 872 mask = rnp->grpmask;
835 spin_unlock(&rnp->lock); /* irqs already disabled. */ 873 spin_unlock(&rnp->lock); /* irqs remain disabled. */
836 rnp = rnp->parent; 874 rnp = rnp->parent;
837 } while (rnp != NULL); 875 } while (rnp != NULL);
838 lastcomp = rsp->completed; 876 lastcomp = rsp->completed;
839 877
840 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 878 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
841 879
842 /* Being offline is a quiescent state, so go record it. */
843 cpu_quiet(cpu, rsp, rdp, lastcomp);
844
845 /* 880 /*
846 * Move callbacks from the outgoing CPU to the running CPU. 881 * Move callbacks from the outgoing CPU to the running CPU.
847 * Note that the outgoing CPU is now quiscent, so it is now 882 * Note that the outgoing CPU is now quiscent, so it is now
848 * (uncharacteristically) safe to access it rcu_data structure. 883 * (uncharacteristically) safe to access its rcu_data structure.
849 * Note also that we must carefully retain the order of the 884 * Note also that we must carefully retain the order of the
850 * outgoing CPU's callbacks in order for rcu_barrier() to work 885 * outgoing CPU's callbacks in order for rcu_barrier() to work
851 * correctly. Finally, note that we start all the callbacks 886 * correctly. Finally, note that we start all the callbacks
@@ -876,8 +911,9 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
876 */ 911 */
877static void rcu_offline_cpu(int cpu) 912static void rcu_offline_cpu(int cpu)
878{ 913{
879 __rcu_offline_cpu(cpu, &rcu_state); 914 __rcu_offline_cpu(cpu, &rcu_sched_state);
880 __rcu_offline_cpu(cpu, &rcu_bh_state); 915 __rcu_offline_cpu(cpu, &rcu_bh_state);
916 rcu_preempt_offline_cpu(cpu);
881} 917}
882 918
883#else /* #ifdef CONFIG_HOTPLUG_CPU */ 919#else /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -963,6 +999,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
963 */ 999 */
964void rcu_check_callbacks(int cpu, int user) 1000void rcu_check_callbacks(int cpu, int user)
965{ 1001{
1002 if (!rcu_pending(cpu))
1003 return; /* if nothing for RCU to do. */
966 if (user || 1004 if (user ||
967 (idle_cpu(cpu) && rcu_scheduler_active && 1005 (idle_cpu(cpu) && rcu_scheduler_active &&
968 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 1006 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
@@ -971,17 +1009,16 @@ void rcu_check_callbacks(int cpu, int user)
971 * Get here if this CPU took its interrupt from user 1009 * Get here if this CPU took its interrupt from user
972 * mode or from the idle loop, and if this is not a 1010 * mode or from the idle loop, and if this is not a
973 * nested interrupt. In this case, the CPU is in 1011 * nested interrupt. In this case, the CPU is in
974 * a quiescent state, so count it. 1012 * a quiescent state, so note it.
975 * 1013 *
976 * No memory barrier is required here because both 1014 * No memory barrier is required here because both
977 * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference 1015 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
978 * only CPU-local variables that other CPUs neither 1016 * variables that other CPUs neither access nor modify,
979 * access nor modify, at least not while the corresponding 1017 * at least not while the corresponding CPU is online.
980 * CPU is online.
981 */ 1018 */
982 1019
983 rcu_qsctr_inc(cpu); 1020 rcu_sched_qs(cpu);
984 rcu_bh_qsctr_inc(cpu); 1021 rcu_bh_qs(cpu);
985 1022
986 } else if (!in_softirq()) { 1023 } else if (!in_softirq()) {
987 1024
@@ -989,11 +1026,12 @@ void rcu_check_callbacks(int cpu, int user)
989 * Get here if this CPU did not take its interrupt from 1026 * Get here if this CPU did not take its interrupt from
990 * softirq, in other words, if it is not interrupting 1027 * softirq, in other words, if it is not interrupting
991 * a rcu_bh read-side critical section. This is an _bh 1028 * a rcu_bh read-side critical section. This is an _bh
992 * critical section, so count it. 1029 * critical section, so note it.
993 */ 1030 */
994 1031
995 rcu_bh_qsctr_inc(cpu); 1032 rcu_bh_qs(cpu);
996 } 1033 }
1034 rcu_preempt_check_callbacks(cpu);
997 raise_softirq(RCU_SOFTIRQ); 1035 raise_softirq(RCU_SOFTIRQ);
998} 1036}
999 1037
@@ -1132,6 +1170,8 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1132{ 1170{
1133 unsigned long flags; 1171 unsigned long flags;
1134 1172
1173 WARN_ON_ONCE(rdp->beenonline == 0);
1174
1135 /* 1175 /*
1136 * If an RCU GP has gone long enough, go check for dyntick 1176 * If an RCU GP has gone long enough, go check for dyntick
1137 * idle CPUs and, if needed, send resched IPIs. 1177 * idle CPUs and, if needed, send resched IPIs.
@@ -1170,8 +1210,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1170 */ 1210 */
1171 smp_mb(); /* See above block comment. */ 1211 smp_mb(); /* See above block comment. */
1172 1212
1173 __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); 1213 __rcu_process_callbacks(&rcu_sched_state,
1214 &__get_cpu_var(rcu_sched_data));
1174 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1215 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1216 rcu_preempt_process_callbacks();
1175 1217
1176 /* 1218 /*
1177 * Memory references from any later RCU read-side critical sections 1219 * Memory references from any later RCU read-side critical sections
@@ -1227,13 +1269,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1227} 1269}
1228 1270
1229/* 1271/*
1230 * Queue an RCU callback for invocation after a grace period. 1272 * Queue an RCU-sched callback for invocation after a grace period.
1231 */ 1273 */
1232void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 1274void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1233{ 1275{
1234 __call_rcu(head, func, &rcu_state); 1276 __call_rcu(head, func, &rcu_sched_state);
1235} 1277}
1236EXPORT_SYMBOL_GPL(call_rcu); 1278EXPORT_SYMBOL_GPL(call_rcu_sched);
1237 1279
1238/* 1280/*
1239 * Queue an RCU for invocation after a quicker grace period. 1281 * Queue an RCU for invocation after a quicker grace period.
@@ -1305,10 +1347,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1305 * by the current CPU, returning 1 if so. This function is part of the 1347 * by the current CPU, returning 1 if so. This function is part of the
1306 * RCU implementation; it is -not- an exported member of the RCU API. 1348 * RCU implementation; it is -not- an exported member of the RCU API.
1307 */ 1349 */
1308int rcu_pending(int cpu) 1350static int rcu_pending(int cpu)
1309{ 1351{
1310 return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || 1352 return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
1311 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); 1353 __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
1354 rcu_preempt_pending(cpu);
1312} 1355}
1313 1356
1314/* 1357/*
@@ -1320,27 +1363,46 @@ int rcu_pending(int cpu)
1320int rcu_needs_cpu(int cpu) 1363int rcu_needs_cpu(int cpu)
1321{ 1364{
1322 /* RCU callbacks either ready or pending? */ 1365 /* RCU callbacks either ready or pending? */
1323 return per_cpu(rcu_data, cpu).nxtlist || 1366 return per_cpu(rcu_sched_data, cpu).nxtlist ||
1324 per_cpu(rcu_bh_data, cpu).nxtlist; 1367 per_cpu(rcu_bh_data, cpu).nxtlist ||
1368 rcu_preempt_needs_cpu(cpu);
1325} 1369}
1326 1370
1327/* 1371/*
1328 * Initialize a CPU's per-CPU RCU data. We take this "scorched earth" 1372 * Do boot-time initialization of a CPU's per-CPU RCU data.
1329 * approach so that we don't have to worry about how long the CPU has
1330 * been gone, or whether it ever was online previously. We do trust the
1331 * ->mynode field, as it is constant for a given struct rcu_data and
1332 * initialized during early boot.
1333 *
1334 * Note that only one online or offline event can be happening at a given
1335 * time. Note also that we can accept some slop in the rsp->completed
1336 * access due to the fact that this CPU cannot possibly have any RCU
1337 * callbacks in flight yet.
1338 */ 1373 */
1339static void __cpuinit 1374static void __init
1340rcu_init_percpu_data(int cpu, struct rcu_state *rsp) 1375rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
1341{ 1376{
1342 unsigned long flags; 1377 unsigned long flags;
1343 int i; 1378 int i;
1379 struct rcu_data *rdp = rsp->rda[cpu];
1380 struct rcu_node *rnp = rcu_get_root(rsp);
1381
1382 /* Set up local state, ensuring consistent view of global state. */
1383 spin_lock_irqsave(&rnp->lock, flags);
1384 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
1385 rdp->nxtlist = NULL;
1386 for (i = 0; i < RCU_NEXT_SIZE; i++)
1387 rdp->nxttail[i] = &rdp->nxtlist;
1388 rdp->qlen = 0;
1389#ifdef CONFIG_NO_HZ
1390 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1391#endif /* #ifdef CONFIG_NO_HZ */
1392 rdp->cpu = cpu;
1393 spin_unlock_irqrestore(&rnp->lock, flags);
1394}
1395
1396/*
1397 * Initialize a CPU's per-CPU RCU data. Note that only one online or
1398 * offline event can be happening at a given time. Note also that we
1399 * can accept some slop in the rsp->completed access due to the fact
1400 * that this CPU cannot possibly have any RCU callbacks in flight yet.
1401 */
1402static void __cpuinit
1403rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1404{
1405 unsigned long flags;
1344 long lastcomp; 1406 long lastcomp;
1345 unsigned long mask; 1407 unsigned long mask;
1346 struct rcu_data *rdp = rsp->rda[cpu]; 1408 struct rcu_data *rdp = rsp->rda[cpu];
@@ -1354,17 +1416,9 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
1354 rdp->passed_quiesc = 0; /* We could be racing with new GP, */ 1416 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1355 rdp->qs_pending = 1; /* so set up to respond to current GP. */ 1417 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1356 rdp->beenonline = 1; /* We have now been online. */ 1418 rdp->beenonline = 1; /* We have now been online. */
1419 rdp->preemptable = preemptable;
1357 rdp->passed_quiesc_completed = lastcomp - 1; 1420 rdp->passed_quiesc_completed = lastcomp - 1;
1358 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
1359 rdp->nxtlist = NULL;
1360 for (i = 0; i < RCU_NEXT_SIZE; i++)
1361 rdp->nxttail[i] = &rdp->nxtlist;
1362 rdp->qlen = 0;
1363 rdp->blimit = blimit; 1421 rdp->blimit = blimit;
1364#ifdef CONFIG_NO_HZ
1365 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1366#endif /* #ifdef CONFIG_NO_HZ */
1367 rdp->cpu = cpu;
1368 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1422 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1369 1423
1370 /* 1424 /*
@@ -1387,34 +1441,21 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
1387 rnp = rnp->parent; 1441 rnp = rnp->parent;
1388 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1442 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
1389 1443
1390 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 1444 spin_unlock_irqrestore(&rsp->onofflock, flags);
1391
1392 /*
1393 * A new grace period might start here. If so, we will be part of
1394 * it, and its gpnum will be greater than ours, so we will
1395 * participate. It is also possible for the gpnum to have been
1396 * incremented before this function was called, and the bitmasks
1397 * to not be filled out until now, in which case we will also
1398 * participate due to our gpnum being behind.
1399 */
1400
1401 /* Since it is coming online, the CPU is in a quiescent state. */
1402 cpu_quiet(cpu, rsp, rdp, lastcomp);
1403 local_irq_restore(flags);
1404} 1445}
1405 1446
1406static void __cpuinit rcu_online_cpu(int cpu) 1447static void __cpuinit rcu_online_cpu(int cpu)
1407{ 1448{
1408 rcu_init_percpu_data(cpu, &rcu_state); 1449 rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
1409 rcu_init_percpu_data(cpu, &rcu_bh_state); 1450 rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
1410 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1451 rcu_preempt_init_percpu_data(cpu);
1411} 1452}
1412 1453
1413/* 1454/*
1414 * Handle CPU online/offline notifcation events. 1455 * Handle CPU online/offline notification events.
1415 */ 1456 */
1416static int __cpuinit rcu_cpu_notify(struct notifier_block *self, 1457int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1417 unsigned long action, void *hcpu) 1458 unsigned long action, void *hcpu)
1418{ 1459{
1419 long cpu = (long)hcpu; 1460 long cpu = (long)hcpu;
1420 1461
@@ -1486,6 +1527,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1486 rnp = rsp->level[i]; 1527 rnp = rsp->level[i];
1487 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1528 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1488 spin_lock_init(&rnp->lock); 1529 spin_lock_init(&rnp->lock);
1530 rnp->gpnum = 0;
1489 rnp->qsmask = 0; 1531 rnp->qsmask = 0;
1490 rnp->qsmaskinit = 0; 1532 rnp->qsmaskinit = 0;
1491 rnp->grplo = j * cpustride; 1533 rnp->grplo = j * cpustride;
@@ -1503,16 +1545,20 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1503 j / rsp->levelspread[i - 1]; 1545 j / rsp->levelspread[i - 1];
1504 } 1546 }
1505 rnp->level = i; 1547 rnp->level = i;
1548 INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
1549 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1506 } 1550 }
1507 } 1551 }
1508} 1552}
1509 1553
1510/* 1554/*
1511 * Helper macro for __rcu_init(). To be used nowhere else! 1555 * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used
1512 * Assigns leaf node pointers into each CPU's rcu_data structure. 1556 * nowhere else! Assigns leaf node pointers into each CPU's rcu_data
1557 * structure.
1513 */ 1558 */
1514#define RCU_DATA_PTR_INIT(rsp, rcu_data) \ 1559#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1515do { \ 1560do { \
1561 rcu_init_one(rsp); \
1516 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1562 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1517 j = 0; \ 1563 j = 0; \
1518 for_each_possible_cpu(i) { \ 1564 for_each_possible_cpu(i) { \
@@ -1520,32 +1566,43 @@ do { \
1520 j++; \ 1566 j++; \
1521 per_cpu(rcu_data, i).mynode = &rnp[j]; \ 1567 per_cpu(rcu_data, i).mynode = &rnp[j]; \
1522 (rsp)->rda[i] = &per_cpu(rcu_data, i); \ 1568 (rsp)->rda[i] = &per_cpu(rcu_data, i); \
1569 rcu_boot_init_percpu_data(i, rsp); \
1523 } \ 1570 } \
1524} while (0) 1571} while (0)
1525 1572
1526static struct notifier_block __cpuinitdata rcu_nb = { 1573#ifdef CONFIG_TREE_PREEMPT_RCU
1527 .notifier_call = rcu_cpu_notify, 1574
1528}; 1575void __init __rcu_init_preempt(void)
1576{
1577 int i; /* All used by RCU_INIT_FLAVOR(). */
1578 int j;
1579 struct rcu_node *rnp;
1580
1581 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1582}
1583
1584#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1585
1586void __init __rcu_init_preempt(void)
1587{
1588}
1589
1590#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1529 1591
1530void __init __rcu_init(void) 1592void __init __rcu_init(void)
1531{ 1593{
1532 int i; /* All used by RCU_DATA_PTR_INIT(). */ 1594 int i; /* All used by RCU_INIT_FLAVOR(). */
1533 int j; 1595 int j;
1534 struct rcu_node *rnp; 1596 struct rcu_node *rnp;
1535 1597
1536 printk(KERN_INFO "Hierarchical RCU implementation.\n"); 1598 rcu_bootup_announce();
1537#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1599#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1538 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1600 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
1539#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 1601#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
1540 rcu_init_one(&rcu_state); 1602 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
1541 RCU_DATA_PTR_INIT(&rcu_state, rcu_data); 1603 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
1542 rcu_init_one(&rcu_bh_state); 1604 __rcu_init_preempt();
1543 RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); 1605 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1544
1545 for_each_online_cpu(i)
1546 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
1547 /* Register notifier for non-boot CPUs */
1548 register_cpu_notifier(&rcu_nb);
1549} 1606}
1550 1607
1551module_param(blimit, int, 0); 1608module_param(blimit, int, 0);