aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c292
1 files changed, 186 insertions, 106 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index ba06207b1dd3..6b76d812740c 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -38,7 +38,7 @@
38#include <linux/nmi.h> 38#include <linux/nmi.h>
39#include <linux/atomic.h> 39#include <linux/atomic.h>
40#include <linux/bitops.h> 40#include <linux/bitops.h>
41#include <linux/module.h> 41#include <linux/export.h>
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/moduleparam.h> 43#include <linux/moduleparam.h>
44#include <linux/percpu.h> 44#include <linux/percpu.h>
@@ -52,13 +52,16 @@
52#include <linux/prefetch.h> 52#include <linux/prefetch.h>
53 53
54#include "rcutree.h" 54#include "rcutree.h"
55#include <trace/events/rcu.h>
56
57#include "rcu.h"
55 58
56/* Data structures. */ 59/* Data structures. */
57 60
58static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; 61static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
59 62
60#define RCU_STATE_INITIALIZER(structname) { \ 63#define RCU_STATE_INITIALIZER(structname) { \
61 .level = { &structname.node[0] }, \ 64 .level = { &structname##_state.node[0] }, \
62 .levelcnt = { \ 65 .levelcnt = { \
63 NUM_RCU_LVL_0, /* root of hierarchy. */ \ 66 NUM_RCU_LVL_0, /* root of hierarchy. */ \
64 NUM_RCU_LVL_1, \ 67 NUM_RCU_LVL_1, \
@@ -69,17 +72,17 @@ static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
69 .signaled = RCU_GP_IDLE, \ 72 .signaled = RCU_GP_IDLE, \
70 .gpnum = -300, \ 73 .gpnum = -300, \
71 .completed = -300, \ 74 .completed = -300, \
72 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname.onofflock), \ 75 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
73 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname.fqslock), \ 76 .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
74 .n_force_qs = 0, \ 77 .n_force_qs = 0, \
75 .n_force_qs_ngp = 0, \ 78 .n_force_qs_ngp = 0, \
76 .name = #structname, \ 79 .name = #structname, \
77} 80}
78 81
79struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); 82struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
80DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); 83DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81 84
82struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 85struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
83DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 86DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
84 87
85static struct rcu_state *rcu_state; 88static struct rcu_state *rcu_state;
@@ -128,8 +131,6 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
128static void invoke_rcu_core(void); 131static void invoke_rcu_core(void);
129static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 132static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
130 133
131#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
132
133/* 134/*
134 * Track the rcutorture test sequence number and the update version 135 * Track the rcutorture test sequence number and the update version
135 * number within a given test. The rcutorture_testseq is incremented 136 * number within a given test. The rcutorture_testseq is incremented
@@ -156,33 +157,41 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
156 * Note a quiescent state. Because we do not need to know 157 * Note a quiescent state. Because we do not need to know
157 * how many quiescent states passed, just if there was at least 158 * how many quiescent states passed, just if there was at least
158 * one since the start of the grace period, this just sets a flag. 159 * one since the start of the grace period, this just sets a flag.
160 * The caller must have disabled preemption.
159 */ 161 */
160void rcu_sched_qs(int cpu) 162void rcu_sched_qs(int cpu)
161{ 163{
162 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); 164 struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
163 165
164 rdp->passed_quiesc_completed = rdp->gpnum - 1; 166 rdp->passed_quiesce_gpnum = rdp->gpnum;
165 barrier(); 167 barrier();
166 rdp->passed_quiesc = 1; 168 if (rdp->passed_quiesce == 0)
169 trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
170 rdp->passed_quiesce = 1;
167} 171}
168 172
169void rcu_bh_qs(int cpu) 173void rcu_bh_qs(int cpu)
170{ 174{
171 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); 175 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
172 176
173 rdp->passed_quiesc_completed = rdp->gpnum - 1; 177 rdp->passed_quiesce_gpnum = rdp->gpnum;
174 barrier(); 178 barrier();
175 rdp->passed_quiesc = 1; 179 if (rdp->passed_quiesce == 0)
180 trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
181 rdp->passed_quiesce = 1;
176} 182}
177 183
178/* 184/*
179 * Note a context switch. This is a quiescent state for RCU-sched, 185 * Note a context switch. This is a quiescent state for RCU-sched,
180 * and requires special handling for preemptible RCU. 186 * and requires special handling for preemptible RCU.
187 * The caller must have disabled preemption.
181 */ 188 */
182void rcu_note_context_switch(int cpu) 189void rcu_note_context_switch(int cpu)
183{ 190{
191 trace_rcu_utilization("Start context switch");
184 rcu_sched_qs(cpu); 192 rcu_sched_qs(cpu);
185 rcu_preempt_note_context_switch(cpu); 193 rcu_preempt_note_context_switch(cpu);
194 trace_rcu_utilization("End context switch");
186} 195}
187EXPORT_SYMBOL_GPL(rcu_note_context_switch); 196EXPORT_SYMBOL_GPL(rcu_note_context_switch);
188 197
@@ -193,7 +202,7 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
193}; 202};
194#endif /* #ifdef CONFIG_NO_HZ */ 203#endif /* #ifdef CONFIG_NO_HZ */
195 204
196static int blimit = 10; /* Maximum callbacks per softirq. */ 205static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
197static int qhimark = 10000; /* If this many pending, ignore blimit. */ 206static int qhimark = 10000; /* If this many pending, ignore blimit. */
198static int qlowmark = 100; /* Once only this many pending, use blimit. */ 207static int qlowmark = 100; /* Once only this many pending, use blimit. */
199 208
@@ -314,6 +323,7 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
314 * trust its state not to change because interrupts are disabled. 323 * trust its state not to change because interrupts are disabled.
315 */ 324 */
316 if (cpu_is_offline(rdp->cpu)) { 325 if (cpu_is_offline(rdp->cpu)) {
326 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
317 rdp->offline_fqs++; 327 rdp->offline_fqs++;
318 return 1; 328 return 1;
319 } 329 }
@@ -354,19 +364,13 @@ void rcu_enter_nohz(void)
354 local_irq_restore(flags); 364 local_irq_restore(flags);
355 return; 365 return;
356 } 366 }
367 trace_rcu_dyntick("Start");
357 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 368 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
358 smp_mb__before_atomic_inc(); /* See above. */ 369 smp_mb__before_atomic_inc(); /* See above. */
359 atomic_inc(&rdtp->dynticks); 370 atomic_inc(&rdtp->dynticks);
360 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */ 371 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
361 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); 372 WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
362 local_irq_restore(flags); 373 local_irq_restore(flags);
363
364 /* If the interrupt queued a callback, get out of dyntick mode. */
365 if (in_irq() &&
366 (__get_cpu_var(rcu_sched_data).nxtlist ||
367 __get_cpu_var(rcu_bh_data).nxtlist ||
368 rcu_preempt_needs_cpu(smp_processor_id())))
369 set_need_resched();
370} 374}
371 375
372/* 376/*
@@ -391,6 +395,7 @@ void rcu_exit_nohz(void)
391 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */ 395 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
392 smp_mb__after_atomic_inc(); /* See above. */ 396 smp_mb__after_atomic_inc(); /* See above. */
393 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 397 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
398 trace_rcu_dyntick("End");
394 local_irq_restore(flags); 399 local_irq_restore(flags);
395} 400}
396 401
@@ -481,11 +486,11 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
481 */ 486 */
482static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) 487static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
483{ 488{
484 unsigned long curr; 489 unsigned int curr;
485 unsigned long snap; 490 unsigned int snap;
486 491
487 curr = (unsigned long)atomic_add_return(0, &rdp->dynticks->dynticks); 492 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
488 snap = (unsigned long)rdp->dynticks_snap; 493 snap = (unsigned int)rdp->dynticks_snap;
489 494
490 /* 495 /*
491 * If the CPU passed through or entered a dynticks idle phase with 496 * If the CPU passed through or entered a dynticks idle phase with
@@ -495,7 +500,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
495 * read-side critical section that started before the beginning 500 * read-side critical section that started before the beginning
496 * of the current RCU grace period. 501 * of the current RCU grace period.
497 */ 502 */
498 if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) { 503 if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
504 trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
499 rdp->dynticks_fqs++; 505 rdp->dynticks_fqs++;
500 return 1; 506 return 1;
501 } 507 }
@@ -537,6 +543,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
537 int cpu; 543 int cpu;
538 long delta; 544 long delta;
539 unsigned long flags; 545 unsigned long flags;
546 int ndetected;
540 struct rcu_node *rnp = rcu_get_root(rsp); 547 struct rcu_node *rnp = rcu_get_root(rsp);
541 548
542 /* Only let one CPU complain about others per time interval. */ 549 /* Only let one CPU complain about others per time interval. */
@@ -553,7 +560,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
553 * Now rat on any tasks that got kicked up to the root rcu_node 560 * Now rat on any tasks that got kicked up to the root rcu_node
554 * due to CPU offlining. 561 * due to CPU offlining.
555 */ 562 */
556 rcu_print_task_stall(rnp); 563 ndetected = rcu_print_task_stall(rnp);
557 raw_spin_unlock_irqrestore(&rnp->lock, flags); 564 raw_spin_unlock_irqrestore(&rnp->lock, flags);
558 565
559 /* 566 /*
@@ -565,17 +572,22 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
565 rsp->name); 572 rsp->name);
566 rcu_for_each_leaf_node(rsp, rnp) { 573 rcu_for_each_leaf_node(rsp, rnp) {
567 raw_spin_lock_irqsave(&rnp->lock, flags); 574 raw_spin_lock_irqsave(&rnp->lock, flags);
568 rcu_print_task_stall(rnp); 575 ndetected += rcu_print_task_stall(rnp);
569 raw_spin_unlock_irqrestore(&rnp->lock, flags); 576 raw_spin_unlock_irqrestore(&rnp->lock, flags);
570 if (rnp->qsmask == 0) 577 if (rnp->qsmask == 0)
571 continue; 578 continue;
572 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) 579 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
573 if (rnp->qsmask & (1UL << cpu)) 580 if (rnp->qsmask & (1UL << cpu)) {
574 printk(" %d", rnp->grplo + cpu); 581 printk(" %d", rnp->grplo + cpu);
582 ndetected++;
583 }
575 } 584 }
576 printk("} (detected by %d, t=%ld jiffies)\n", 585 printk("} (detected by %d, t=%ld jiffies)\n",
577 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 586 smp_processor_id(), (long)(jiffies - rsp->gp_start));
578 trigger_all_cpu_backtrace(); 587 if (ndetected == 0)
588 printk(KERN_ERR "INFO: Stall ended before state dump start\n");
589 else if (!trigger_all_cpu_backtrace())
590 dump_stack();
579 591
580 /* If so configured, complain about tasks blocking the grace period. */ 592 /* If so configured, complain about tasks blocking the grace period. */
581 593
@@ -596,7 +608,8 @@ static void print_cpu_stall(struct rcu_state *rsp)
596 */ 608 */
597 printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n", 609 printk(KERN_ERR "INFO: %s detected stall on CPU %d (t=%lu jiffies)\n",
598 rsp->name, smp_processor_id(), jiffies - rsp->gp_start); 610 rsp->name, smp_processor_id(), jiffies - rsp->gp_start);
599 trigger_all_cpu_backtrace(); 611 if (!trigger_all_cpu_backtrace())
612 dump_stack();
600 613
601 raw_spin_lock_irqsave(&rnp->lock, flags); 614 raw_spin_lock_irqsave(&rnp->lock, flags);
602 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall)) 615 if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
@@ -678,9 +691,10 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
678 * go looking for one. 691 * go looking for one.
679 */ 692 */
680 rdp->gpnum = rnp->gpnum; 693 rdp->gpnum = rnp->gpnum;
694 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
681 if (rnp->qsmask & rdp->grpmask) { 695 if (rnp->qsmask & rdp->grpmask) {
682 rdp->qs_pending = 1; 696 rdp->qs_pending = 1;
683 rdp->passed_quiesc = 0; 697 rdp->passed_quiesce = 0;
684 } else 698 } else
685 rdp->qs_pending = 0; 699 rdp->qs_pending = 0;
686 } 700 }
@@ -741,6 +755,7 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
741 755
742 /* Remember that we saw this grace-period completion. */ 756 /* Remember that we saw this grace-period completion. */
743 rdp->completed = rnp->completed; 757 rdp->completed = rnp->completed;
758 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
744 759
745 /* 760 /*
746 * If we were in an extended quiescent state, we may have 761 * If we were in an extended quiescent state, we may have
@@ -826,31 +841,31 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
826 struct rcu_data *rdp = this_cpu_ptr(rsp->rda); 841 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
827 struct rcu_node *rnp = rcu_get_root(rsp); 842 struct rcu_node *rnp = rcu_get_root(rsp);
828 843
829 if (!cpu_needs_another_gp(rsp, rdp) || rsp->fqs_active) { 844 if (!rcu_scheduler_fully_active ||
830 if (cpu_needs_another_gp(rsp, rdp)) 845 !cpu_needs_another_gp(rsp, rdp)) {
831 rsp->fqs_need_gp = 1; 846 /*
832 if (rnp->completed == rsp->completed) { 847 * Either the scheduler hasn't yet spawned the first
833 raw_spin_unlock_irqrestore(&rnp->lock, flags); 848 * non-idle task or this CPU does not need another
834 return; 849 * grace period. Either way, don't start a new grace
835 } 850 * period.
836 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 851 */
852 raw_spin_unlock_irqrestore(&rnp->lock, flags);
853 return;
854 }
837 855
856 if (rsp->fqs_active) {
838 /* 857 /*
839 * Propagate new ->completed value to rcu_node structures 858 * This CPU needs a grace period, but force_quiescent_state()
840 * so that other CPUs don't have to wait until the start 859 * is running. Tell it to start one on this CPU's behalf.
841 * of the next grace period to process their callbacks.
842 */ 860 */
843 rcu_for_each_node_breadth_first(rsp, rnp) { 861 rsp->fqs_need_gp = 1;
844 raw_spin_lock(&rnp->lock); /* irqs already disabled. */ 862 raw_spin_unlock_irqrestore(&rnp->lock, flags);
845 rnp->completed = rsp->completed;
846 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
847 }
848 local_irq_restore(flags);
849 return; 863 return;
850 } 864 }
851 865
852 /* Advance to a new grace period and initialize state. */ 866 /* Advance to a new grace period and initialize state. */
853 rsp->gpnum++; 867 rsp->gpnum++;
868 trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
854 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); 869 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
855 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 870 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
856 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 871 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
@@ -865,6 +880,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
865 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 880 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
866 rcu_start_gp_per_cpu(rsp, rnp, rdp); 881 rcu_start_gp_per_cpu(rsp, rnp, rdp);
867 rcu_preempt_boost_start_gp(rnp); 882 rcu_preempt_boost_start_gp(rnp);
883 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
884 rnp->level, rnp->grplo,
885 rnp->grphi, rnp->qsmask);
868 raw_spin_unlock_irqrestore(&rnp->lock, flags); 886 raw_spin_unlock_irqrestore(&rnp->lock, flags);
869 return; 887 return;
870 } 888 }
@@ -901,6 +919,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
901 if (rnp == rdp->mynode) 919 if (rnp == rdp->mynode)
902 rcu_start_gp_per_cpu(rsp, rnp, rdp); 920 rcu_start_gp_per_cpu(rsp, rnp, rdp);
903 rcu_preempt_boost_start_gp(rnp); 921 rcu_preempt_boost_start_gp(rnp);
922 trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
923 rnp->level, rnp->grplo,
924 rnp->grphi, rnp->qsmask);
904 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 925 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
905 } 926 }
906 927
@@ -922,6 +943,8 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
922 __releases(rcu_get_root(rsp)->lock) 943 __releases(rcu_get_root(rsp)->lock)
923{ 944{
924 unsigned long gp_duration; 945 unsigned long gp_duration;
946 struct rcu_node *rnp = rcu_get_root(rsp);
947 struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
925 948
926 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 949 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
927 950
@@ -933,7 +956,41 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
933 gp_duration = jiffies - rsp->gp_start; 956 gp_duration = jiffies - rsp->gp_start;
934 if (gp_duration > rsp->gp_max) 957 if (gp_duration > rsp->gp_max)
935 rsp->gp_max = gp_duration; 958 rsp->gp_max = gp_duration;
936 rsp->completed = rsp->gpnum; 959
960 /*
961 * We know the grace period is complete, but to everyone else
962 * it appears to still be ongoing. But it is also the case
963 * that to everyone else it looks like there is nothing that
964 * they can do to advance the grace period. It is therefore
965 * safe for us to drop the lock in order to mark the grace
966 * period as completed in all of the rcu_node structures.
967 *
968 * But if this CPU needs another grace period, it will take
969 * care of this while initializing the next grace period.
970 * We use RCU_WAIT_TAIL instead of the usual RCU_DONE_TAIL
971 * because the callbacks have not yet been advanced: Those
972 * callbacks are waiting on the grace period that just now
973 * completed.
974 */
975 if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) {
976 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
977
978 /*
979 * Propagate new ->completed value to rcu_node structures
980 * so that other CPUs don't have to wait until the start
981 * of the next grace period to process their callbacks.
982 */
983 rcu_for_each_node_breadth_first(rsp, rnp) {
984 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
985 rnp->completed = rsp->gpnum;
986 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
987 }
988 rnp = rcu_get_root(rsp);
989 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
990 }
991
992 rsp->completed = rsp->gpnum; /* Declare the grace period complete. */
993 trace_rcu_grace_period(rsp->name, rsp->completed, "end");
937 rsp->signaled = RCU_GP_IDLE; 994 rsp->signaled = RCU_GP_IDLE;
938 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 995 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
939} 996}
@@ -962,6 +1019,10 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
962 return; 1019 return;
963 } 1020 }
964 rnp->qsmask &= ~mask; 1021 rnp->qsmask &= ~mask;
1022 trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
1023 mask, rnp->qsmask, rnp->level,
1024 rnp->grplo, rnp->grphi,
1025 !!rnp->gp_tasks);
965 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { 1026 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
966 1027
967 /* Other bits still set at this level, so done. */ 1028 /* Other bits still set at this level, so done. */
@@ -1000,7 +1061,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1000 * based on quiescent states detected in an earlier grace period! 1061 * based on quiescent states detected in an earlier grace period!
1001 */ 1062 */
1002static void 1063static void
1003rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 1064rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp)
1004{ 1065{
1005 unsigned long flags; 1066 unsigned long flags;
1006 unsigned long mask; 1067 unsigned long mask;
@@ -1008,17 +1069,15 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long las
1008 1069
1009 rnp = rdp->mynode; 1070 rnp = rdp->mynode;
1010 raw_spin_lock_irqsave(&rnp->lock, flags); 1071 raw_spin_lock_irqsave(&rnp->lock, flags);
1011 if (lastcomp != rnp->completed) { 1072 if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) {
1012 1073
1013 /* 1074 /*
1014 * Someone beat us to it for this grace period, so leave. 1075 * The grace period in which this quiescent state was
1015 * The race with GP start is resolved by the fact that we 1076 * recorded has ended, so don't report it upwards.
1016 * hold the leaf rcu_node lock, so that the per-CPU bits 1077 * We will instead need a new quiescent state that lies
1017 * cannot yet be initialized -- so we would simply find our 1078 * within the current grace period.
1018 * CPU's bit already cleared in rcu_report_qs_rnp() if this
1019 * race occurred.
1020 */ 1079 */
1021 rdp->passed_quiesc = 0; /* try again later! */ 1080 rdp->passed_quiesce = 0; /* need qs for new gp. */
1022 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1081 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1023 return; 1082 return;
1024 } 1083 }
@@ -1062,14 +1121,14 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
1062 * Was there a quiescent state since the beginning of the grace 1121 * Was there a quiescent state since the beginning of the grace
1063 * period? If no, then exit and wait for the next call. 1122 * period? If no, then exit and wait for the next call.
1064 */ 1123 */
1065 if (!rdp->passed_quiesc) 1124 if (!rdp->passed_quiesce)
1066 return; 1125 return;
1067 1126
1068 /* 1127 /*
1069 * Tell RCU we are done (but rcu_report_qs_rdp() will be the 1128 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
1070 * judge of that). 1129 * judge of that).
1071 */ 1130 */
1072 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); 1131 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum);
1073} 1132}
1074 1133
1075#ifdef CONFIG_HOTPLUG_CPU 1134#ifdef CONFIG_HOTPLUG_CPU
@@ -1130,11 +1189,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1130 if (rnp->qsmaskinit != 0) { 1189 if (rnp->qsmaskinit != 0) {
1131 if (rnp != rdp->mynode) 1190 if (rnp != rdp->mynode)
1132 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1191 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1192 else
1193 trace_rcu_grace_period(rsp->name,
1194 rnp->gpnum + 1 -
1195 !!(rnp->qsmask & mask),
1196 "cpuofl");
1133 break; 1197 break;
1134 } 1198 }
1135 if (rnp == rdp->mynode) 1199 if (rnp == rdp->mynode) {
1200 trace_rcu_grace_period(rsp->name,
1201 rnp->gpnum + 1 -
1202 !!(rnp->qsmask & mask),
1203 "cpuofl");
1136 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); 1204 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
1137 else 1205 } else
1138 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1206 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1139 mask = rnp->grpmask; 1207 mask = rnp->grpmask;
1140 rnp = rnp->parent; 1208 rnp = rnp->parent;
@@ -1190,17 +1258,22 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1190{ 1258{
1191 unsigned long flags; 1259 unsigned long flags;
1192 struct rcu_head *next, *list, **tail; 1260 struct rcu_head *next, *list, **tail;
1193 int count; 1261 int bl, count;
1194 1262
1195 /* If no callbacks are ready, just return.*/ 1263 /* If no callbacks are ready, just return.*/
1196 if (!cpu_has_callbacks_ready_to_invoke(rdp)) 1264 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
1265 trace_rcu_batch_start(rsp->name, 0, 0);
1266 trace_rcu_batch_end(rsp->name, 0);
1197 return; 1267 return;
1268 }
1198 1269
1199 /* 1270 /*
1200 * Extract the list of ready callbacks, disabling to prevent 1271 * Extract the list of ready callbacks, disabling to prevent
1201 * races with call_rcu() from interrupt handlers. 1272 * races with call_rcu() from interrupt handlers.
1202 */ 1273 */
1203 local_irq_save(flags); 1274 local_irq_save(flags);
1275 bl = rdp->blimit;
1276 trace_rcu_batch_start(rsp->name, rdp->qlen, bl);
1204 list = rdp->nxtlist; 1277 list = rdp->nxtlist;
1205 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL]; 1278 rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1206 *rdp->nxttail[RCU_DONE_TAIL] = NULL; 1279 *rdp->nxttail[RCU_DONE_TAIL] = NULL;
@@ -1216,13 +1289,14 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1216 next = list->next; 1289 next = list->next;
1217 prefetch(next); 1290 prefetch(next);
1218 debug_rcu_head_unqueue(list); 1291 debug_rcu_head_unqueue(list);
1219 __rcu_reclaim(list); 1292 __rcu_reclaim(rsp->name, list);
1220 list = next; 1293 list = next;
1221 if (++count >= rdp->blimit) 1294 if (++count >= bl)
1222 break; 1295 break;
1223 } 1296 }
1224 1297
1225 local_irq_save(flags); 1298 local_irq_save(flags);
1299 trace_rcu_batch_end(rsp->name, count);
1226 1300
1227 /* Update count, and requeue any remaining callbacks. */ 1301 /* Update count, and requeue any remaining callbacks. */
1228 rdp->qlen -= count; 1302 rdp->qlen -= count;
@@ -1250,7 +1324,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1250 1324
1251 local_irq_restore(flags); 1325 local_irq_restore(flags);
1252 1326
1253 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1327 /* Re-invoke RCU core processing if there are callbacks remaining. */
1254 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1328 if (cpu_has_callbacks_ready_to_invoke(rdp))
1255 invoke_rcu_core(); 1329 invoke_rcu_core();
1256} 1330}
@@ -1258,7 +1332,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1258/* 1332/*
1259 * Check to see if this CPU is in a non-context-switch quiescent state 1333 * Check to see if this CPU is in a non-context-switch quiescent state
1260 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh). 1334 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
1261 * Also schedule the RCU softirq handler. 1335 * Also schedule RCU core processing.
1262 * 1336 *
1263 * This function must be called with hardirqs disabled. It is normally 1337 * This function must be called with hardirqs disabled. It is normally
1264 * invoked from the scheduling-clock interrupt. If rcu_pending returns 1338 * invoked from the scheduling-clock interrupt. If rcu_pending returns
@@ -1266,6 +1340,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1266 */ 1340 */
1267void rcu_check_callbacks(int cpu, int user) 1341void rcu_check_callbacks(int cpu, int user)
1268{ 1342{
1343 trace_rcu_utilization("Start scheduler-tick");
1269 if (user || 1344 if (user ||
1270 (idle_cpu(cpu) && rcu_scheduler_active && 1345 (idle_cpu(cpu) && rcu_scheduler_active &&
1271 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { 1346 !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
@@ -1299,6 +1374,7 @@ void rcu_check_callbacks(int cpu, int user)
1299 rcu_preempt_check_callbacks(cpu); 1374 rcu_preempt_check_callbacks(cpu);
1300 if (rcu_pending(cpu)) 1375 if (rcu_pending(cpu))
1301 invoke_rcu_core(); 1376 invoke_rcu_core();
1377 trace_rcu_utilization("End scheduler-tick");
1302} 1378}
1303 1379
1304#ifdef CONFIG_SMP 1380#ifdef CONFIG_SMP
@@ -1360,10 +1436,14 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1360 unsigned long flags; 1436 unsigned long flags;
1361 struct rcu_node *rnp = rcu_get_root(rsp); 1437 struct rcu_node *rnp = rcu_get_root(rsp);
1362 1438
1363 if (!rcu_gp_in_progress(rsp)) 1439 trace_rcu_utilization("Start fqs");
1440 if (!rcu_gp_in_progress(rsp)) {
1441 trace_rcu_utilization("End fqs");
1364 return; /* No grace period in progress, nothing to force. */ 1442 return; /* No grace period in progress, nothing to force. */
1443 }
1365 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) { 1444 if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) {
1366 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1445 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
1446 trace_rcu_utilization("End fqs");
1367 return; /* Someone else is already on the job. */ 1447 return; /* Someone else is already on the job. */
1368 } 1448 }
1369 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies)) 1449 if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies))
@@ -1412,11 +1492,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1412 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */ 1492 raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */
1413 rsp->fqs_need_gp = 0; 1493 rsp->fqs_need_gp = 0;
1414 rcu_start_gp(rsp, flags); /* releases rnp->lock */ 1494 rcu_start_gp(rsp, flags); /* releases rnp->lock */
1495 trace_rcu_utilization("End fqs");
1415 return; 1496 return;
1416 } 1497 }
1417 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 1498 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
1418unlock_fqs_ret: 1499unlock_fqs_ret:
1419 raw_spin_unlock_irqrestore(&rsp->fqslock, flags); 1500 raw_spin_unlock_irqrestore(&rsp->fqslock, flags);
1501 trace_rcu_utilization("End fqs");
1420} 1502}
1421 1503
1422#else /* #ifdef CONFIG_SMP */ 1504#else /* #ifdef CONFIG_SMP */
@@ -1429,9 +1511,9 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1429#endif /* #else #ifdef CONFIG_SMP */ 1511#endif /* #else #ifdef CONFIG_SMP */
1430 1512
1431/* 1513/*
1432 * This does the RCU processing work from softirq context for the 1514 * This does the RCU core processing work for the specified rcu_state
1433 * specified rcu_state and rcu_data structures. This may be called 1515 * and rcu_data structures. This may be called only from the CPU to
1434 * only from the CPU to whom the rdp belongs. 1516 * whom the rdp belongs.
1435 */ 1517 */
1436static void 1518static void
1437__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 1519__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
@@ -1468,24 +1550,24 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1468} 1550}
1469 1551
1470/* 1552/*
1471 * Do softirq processing for the current CPU. 1553 * Do RCU core processing for the current CPU.
1472 */ 1554 */
1473static void rcu_process_callbacks(struct softirq_action *unused) 1555static void rcu_process_callbacks(struct softirq_action *unused)
1474{ 1556{
1557 trace_rcu_utilization("Start RCU core");
1475 __rcu_process_callbacks(&rcu_sched_state, 1558 __rcu_process_callbacks(&rcu_sched_state,
1476 &__get_cpu_var(rcu_sched_data)); 1559 &__get_cpu_var(rcu_sched_data));
1477 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1560 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1478 rcu_preempt_process_callbacks(); 1561 rcu_preempt_process_callbacks();
1479 1562 trace_rcu_utilization("End RCU core");
1480 /* If we are last CPU on way to dyntick-idle mode, accelerate it. */
1481 rcu_needs_cpu_flush();
1482} 1563}
1483 1564
1484/* 1565/*
1485 * Wake up the current CPU's kthread. This replaces raise_softirq() 1566 * Schedule RCU callback invocation. If the specified type of RCU
1486 * in earlier versions of RCU. Note that because we are running on 1567 * does not support RCU priority boosting, just do a direct call,
1487 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task 1568 * otherwise wake up the per-CPU kernel kthread. Note that because we
1488 * cannot disappear out from under us. 1569 * are running on the current CPU with interrupts disabled, the
1570 * rcu_cpu_kthread_task cannot disappear out from under us.
1489 */ 1571 */
1490static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) 1572static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1491{ 1573{
@@ -1530,6 +1612,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1530 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1612 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1531 rdp->qlen++; 1613 rdp->qlen++;
1532 1614
1615 if (__is_kfree_rcu_offset((unsigned long)func))
1616 trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
1617 rdp->qlen);
1618 else
1619 trace_rcu_callback(rsp->name, head, rdp->qlen);
1620
1533 /* If interrupts were disabled, don't dive into RCU core. */ 1621 /* If interrupts were disabled, don't dive into RCU core. */
1534 if (irqs_disabled_flags(flags)) { 1622 if (irqs_disabled_flags(flags)) {
1535 local_irq_restore(flags); 1623 local_irq_restore(flags);
@@ -1613,18 +1701,9 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
1613 */ 1701 */
1614void synchronize_sched(void) 1702void synchronize_sched(void)
1615{ 1703{
1616 struct rcu_synchronize rcu;
1617
1618 if (rcu_blocking_is_gp()) 1704 if (rcu_blocking_is_gp())
1619 return; 1705 return;
1620 1706 wait_rcu_gp(call_rcu_sched);
1621 init_rcu_head_on_stack(&rcu.head);
1622 init_completion(&rcu.completion);
1623 /* Will wake me after RCU finished. */
1624 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1625 /* Wait for it. */
1626 wait_for_completion(&rcu.completion);
1627 destroy_rcu_head_on_stack(&rcu.head);
1628} 1707}
1629EXPORT_SYMBOL_GPL(synchronize_sched); 1708EXPORT_SYMBOL_GPL(synchronize_sched);
1630 1709
@@ -1639,18 +1718,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
1639 */ 1718 */
1640void synchronize_rcu_bh(void) 1719void synchronize_rcu_bh(void)
1641{ 1720{
1642 struct rcu_synchronize rcu;
1643
1644 if (rcu_blocking_is_gp()) 1721 if (rcu_blocking_is_gp())
1645 return; 1722 return;
1646 1723 wait_rcu_gp(call_rcu_bh);
1647 init_rcu_head_on_stack(&rcu.head);
1648 init_completion(&rcu.completion);
1649 /* Will wake me after RCU finished. */
1650 call_rcu_bh(&rcu.head, wakeme_after_rcu);
1651 /* Wait for it. */
1652 wait_for_completion(&rcu.completion);
1653 destroy_rcu_head_on_stack(&rcu.head);
1654} 1724}
1655EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 1725EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1656 1726
@@ -1671,7 +1741,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1671 check_cpu_stall(rsp, rdp); 1741 check_cpu_stall(rsp, rdp);
1672 1742
1673 /* Is the RCU core waiting for a quiescent state from this CPU? */ 1743 /* Is the RCU core waiting for a quiescent state from this CPU? */
1674 if (rdp->qs_pending && !rdp->passed_quiesc) { 1744 if (rcu_scheduler_fully_active &&
1745 rdp->qs_pending && !rdp->passed_quiesce) {
1675 1746
1676 /* 1747 /*
1677 * If force_quiescent_state() coming soon and this CPU 1748 * If force_quiescent_state() coming soon and this CPU
@@ -1683,7 +1754,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1683 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, 1754 ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1,
1684 jiffies)) 1755 jiffies))
1685 set_need_resched(); 1756 set_need_resched();
1686 } else if (rdp->qs_pending && rdp->passed_quiesc) { 1757 } else if (rdp->qs_pending && rdp->passed_quiesce) {
1687 rdp->n_rp_report_qs++; 1758 rdp->n_rp_report_qs++;
1688 return 1; 1759 return 1;
1689 } 1760 }
@@ -1846,6 +1917,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
1846 rdp->dynticks = &per_cpu(rcu_dynticks, cpu); 1917 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
1847#endif /* #ifdef CONFIG_NO_HZ */ 1918#endif /* #ifdef CONFIG_NO_HZ */
1848 rdp->cpu = cpu; 1919 rdp->cpu = cpu;
1920 rdp->rsp = rsp;
1849 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1921 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1850} 1922}
1851 1923
@@ -1865,8 +1937,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
1865 1937
1866 /* Set up local state, ensuring consistent view of global state. */ 1938 /* Set up local state, ensuring consistent view of global state. */
1867 raw_spin_lock_irqsave(&rnp->lock, flags); 1939 raw_spin_lock_irqsave(&rnp->lock, flags);
1868 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1869 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1870 rdp->beenonline = 1; /* We have now been online. */ 1940 rdp->beenonline = 1; /* We have now been online. */
1871 rdp->preemptible = preemptible; 1941 rdp->preemptible = preemptible;
1872 rdp->qlen_last_fqs_check = 0; 1942 rdp->qlen_last_fqs_check = 0;
@@ -1891,9 +1961,17 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
1891 rnp->qsmaskinit |= mask; 1961 rnp->qsmaskinit |= mask;
1892 mask = rnp->grpmask; 1962 mask = rnp->grpmask;
1893 if (rnp == rdp->mynode) { 1963 if (rnp == rdp->mynode) {
1894 rdp->gpnum = rnp->completed; /* if GP in progress... */ 1964 /*
1965 * If there is a grace period in progress, we will
1966 * set up to wait for it next time we run the
1967 * RCU core code.
1968 */
1969 rdp->gpnum = rnp->completed;
1895 rdp->completed = rnp->completed; 1970 rdp->completed = rnp->completed;
1896 rdp->passed_quiesc_completed = rnp->completed - 1; 1971 rdp->passed_quiesce = 0;
1972 rdp->qs_pending = 0;
1973 rdp->passed_quiesce_gpnum = rnp->gpnum - 1;
1974 trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
1897 } 1975 }
1898 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ 1976 raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
1899 rnp = rnp->parent; 1977 rnp = rnp->parent;
@@ -1919,6 +1997,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1919 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); 1997 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1920 struct rcu_node *rnp = rdp->mynode; 1998 struct rcu_node *rnp = rdp->mynode;
1921 1999
2000 trace_rcu_utilization("Start CPU hotplug");
1922 switch (action) { 2001 switch (action) {
1923 case CPU_UP_PREPARE: 2002 case CPU_UP_PREPARE:
1924 case CPU_UP_PREPARE_FROZEN: 2003 case CPU_UP_PREPARE_FROZEN:
@@ -1954,6 +2033,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1954 default: 2033 default:
1955 break; 2034 break;
1956 } 2035 }
2036 trace_rcu_utilization("End CPU hotplug");
1957 return NOTIFY_OK; 2037 return NOTIFY_OK;
1958} 2038}
1959 2039