aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c390
1 files changed, 265 insertions, 125 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 52b06f6e158c..f3077c0ab181 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -49,13 +49,6 @@
49 49
50#include "rcutree.h" 50#include "rcutree.h"
51 51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53static struct lock_class_key rcu_lock_key;
54struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56EXPORT_SYMBOL_GPL(rcu_lock_map);
57#endif
58
59/* Data structures. */ 52/* Data structures. */
60 53
61#define RCU_STATE_INITIALIZER(name) { \ 54#define RCU_STATE_INITIALIZER(name) { \
@@ -66,10 +59,13 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
66 NUM_RCU_LVL_2, \ 59 NUM_RCU_LVL_2, \
67 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
68 }, \ 61 }, \
69 .signaled = RCU_SIGNAL_INIT, \ 62 .signaled = RCU_GP_IDLE, \
70 .gpnum = -300, \ 63 .gpnum = -300, \
71 .completed = -300, \ 64 .completed = -300, \
72 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
66 .orphan_cbs_list = NULL, \
67 .orphan_cbs_tail = &name.orphan_cbs_list, \
68 .orphan_qlen = 0, \
73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ 69 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
74 .n_force_qs = 0, \ 70 .n_force_qs = 0, \
75 .n_force_qs_ngp = 0, \ 71 .n_force_qs_ngp = 0, \
@@ -81,24 +77,16 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 77struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 78DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 79
84extern long rcu_batches_completed_sched(void);
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
87 struct rcu_node *rnp, unsigned long flags);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
89#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp,
93 struct rcu_data *rdp);
94static void __call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *rcu),
96 struct rcu_state *rsp);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
99 int preemptable);
100 80
101#include "rcutree_plugin.h" 81/*
82 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
83 * permit this function to be invoked without holding the root rcu_node
84 * structure's ->lock, but of course results can be subject to change.
85 */
86static int rcu_gp_in_progress(struct rcu_state *rsp)
87{
88 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
89}
102 90
103/* 91/*
104 * Note a quiescent state. Because we do not need to know 92 * Note a quiescent state. Because we do not need to know
@@ -137,6 +125,10 @@ static int blimit = 10; /* Maximum callbacks per softirq. */
137static int qhimark = 10000; /* If this many pending, ignore blimit. */ 125static int qhimark = 10000; /* If this many pending, ignore blimit. */
138static int qlowmark = 100; /* Once only this many pending, use blimit. */ 126static int qlowmark = 100; /* Once only this many pending, use blimit. */
139 127
128module_param(blimit, int, 0);
129module_param(qhimark, int, 0);
130module_param(qlowmark, int, 0);
131
140static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 132static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
141static int rcu_pending(int cpu); 133static int rcu_pending(int cpu);
142 134
@@ -173,9 +165,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
173static int 165static int
174cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 166cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
175{ 167{
176 /* ACCESS_ONCE() because we are accessing outside of lock. */ 168 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
177 return *rdp->nxttail[RCU_DONE_TAIL] &&
178 ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
179} 169}
180 170
181/* 171/*
@@ -369,7 +359,7 @@ static long dyntick_recall_completed(struct rcu_state *rsp)
369/* 359/*
370 * Snapshot the specified CPU's dynticks counter so that we can later 360 * Snapshot the specified CPU's dynticks counter so that we can later
371 * credit them with an implicit quiescent state. Return 1 if this CPU 361 * credit them with an implicit quiescent state. Return 1 if this CPU
372 * is already in a quiescent state courtesy of dynticks idle mode. 362 * is in dynticks idle mode, which is an extended quiescent state.
373 */ 363 */
374static int dyntick_save_progress_counter(struct rcu_data *rdp) 364static int dyntick_save_progress_counter(struct rcu_data *rdp)
375{ 365{
@@ -475,30 +465,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
475 long delta; 465 long delta;
476 unsigned long flags; 466 unsigned long flags;
477 struct rcu_node *rnp = rcu_get_root(rsp); 467 struct rcu_node *rnp = rcu_get_root(rsp);
478 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
479 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
480 468
481 /* Only let one CPU complain about others per time interval. */ 469 /* Only let one CPU complain about others per time interval. */
482 470
483 spin_lock_irqsave(&rnp->lock, flags); 471 spin_lock_irqsave(&rnp->lock, flags);
484 delta = jiffies - rsp->jiffies_stall; 472 delta = jiffies - rsp->jiffies_stall;
485 if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { 473 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
486 spin_unlock_irqrestore(&rnp->lock, flags); 474 spin_unlock_irqrestore(&rnp->lock, flags);
487 return; 475 return;
488 } 476 }
489 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 477 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
478
479 /*
480 * Now rat on any tasks that got kicked up to the root rcu_node
481 * due to CPU offlining.
482 */
483 rcu_print_task_stall(rnp);
490 spin_unlock_irqrestore(&rnp->lock, flags); 484 spin_unlock_irqrestore(&rnp->lock, flags);
491 485
492 /* OK, time to rat on our buddy... */ 486 /* OK, time to rat on our buddy... */
493 487
494 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 488 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
495 for (; rnp_cur < rnp_end; rnp_cur++) { 489 rcu_for_each_leaf_node(rsp, rnp) {
496 rcu_print_task_stall(rnp); 490 rcu_print_task_stall(rnp);
497 if (rnp_cur->qsmask == 0) 491 if (rnp->qsmask == 0)
498 continue; 492 continue;
499 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 493 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
500 if (rnp_cur->qsmask & (1UL << cpu)) 494 if (rnp->qsmask & (1UL << cpu))
501 printk(" %d", rnp_cur->grplo + cpu); 495 printk(" %d", rnp->grplo + cpu);
502 } 496 }
503 printk(" (detected by %d, t=%ld jiffies)\n", 497 printk(" (detected by %d, t=%ld jiffies)\n",
504 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 498 smp_processor_id(), (long)(jiffies - rsp->gp_start));
@@ -537,8 +531,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
537 /* We haven't checked in, so go dump stack. */ 531 /* We haven't checked in, so go dump stack. */
538 print_cpu_stall(rsp); 532 print_cpu_stall(rsp);
539 533
540 } else if (rsp->gpnum != rsp->completed && 534 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
541 delta >= RCU_STALL_RAT_DELAY) {
542 535
543 /* They had two time units to dump stack, so complain. */ 536 /* They had two time units to dump stack, so complain. */
544 print_other_cpu_stall(rsp); 537 print_other_cpu_stall(rsp);
@@ -617,9 +610,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
617 note_new_gpnum(rsp, rdp); 610 note_new_gpnum(rsp, rdp);
618 611
619 /* 612 /*
620 * Because we are first, we know that all our callbacks will 613 * Because this CPU just now started the new grace period, we know
621 * be covered by this upcoming grace period, even the ones 614 * that all of its callbacks will be covered by this upcoming grace
622 * that were registered arbitrarily recently. 615 * period, even the ones that were registered arbitrarily recently.
616 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
617 *
618 * Other CPUs cannot be sure exactly when the grace period started.
619 * Therefore, their recently registered callbacks must pass through
620 * an additional RCU_NEXT_READY stage, so that they will be handled
621 * by the next RCU grace period.
623 */ 622 */
624 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 623 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
625 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 624 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
@@ -657,15 +656,18 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
657 * one corresponding to this CPU, due to the fact that we have 656 * one corresponding to this CPU, due to the fact that we have
658 * irqs disabled. 657 * irqs disabled.
659 */ 658 */
660 for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) { 659 rcu_for_each_node_breadth_first(rsp, rnp) {
661 spin_lock(&rnp->lock); /* irqs already disabled. */ 660 spin_lock(&rnp->lock); /* irqs already disabled. */
662 rcu_preempt_check_blocked_tasks(rnp); 661 rcu_preempt_check_blocked_tasks(rnp);
663 rnp->qsmask = rnp->qsmaskinit; 662 rnp->qsmask = rnp->qsmaskinit;
664 rnp->gpnum = rsp->gpnum; 663 rnp->gpnum = rsp->gpnum;
665 spin_unlock(&rnp->lock); /* irqs already disabled. */ 664 spin_unlock(&rnp->lock); /* irqs remain disabled. */
666 } 665 }
667 666
667 rnp = rcu_get_root(rsp);
668 spin_lock(&rnp->lock); /* irqs already disabled. */
668 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 669 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
670 spin_unlock(&rnp->lock); /* irqs remain disabled. */
669 spin_unlock_irqrestore(&rsp->onofflock, flags); 671 spin_unlock_irqrestore(&rsp->onofflock, flags);
670} 672}
671 673
@@ -703,10 +705,11 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
703 * hold rnp->lock, as required by rcu_start_gp(), which will release it. 705 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
704 */ 706 */
705static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 707static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
706 __releases(rnp->lock) 708 __releases(rcu_get_root(rsp)->lock)
707{ 709{
708 WARN_ON_ONCE(rsp->completed == rsp->gpnum); 710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
709 rsp->completed = rsp->gpnum; 711 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE;
710 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
711 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
712} 715}
@@ -842,17 +845,63 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
842#ifdef CONFIG_HOTPLUG_CPU 845#ifdef CONFIG_HOTPLUG_CPU
843 846
844/* 847/*
848 * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the
849 * specified flavor of RCU. The callbacks will be adopted by the next
850 * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever
851 * comes first. Because this is invoked from the CPU_DYING notifier,
852 * irqs are already disabled.
853 */
854static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
855{
856 int i;
857 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
858
859 if (rdp->nxtlist == NULL)
860 return; /* irqs disabled, so comparison is stable. */
861 spin_lock(&rsp->onofflock); /* irqs already disabled. */
862 *rsp->orphan_cbs_tail = rdp->nxtlist;
863 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
864 rdp->nxtlist = NULL;
865 for (i = 0; i < RCU_NEXT_SIZE; i++)
866 rdp->nxttail[i] = &rdp->nxtlist;
867 rsp->orphan_qlen += rdp->qlen;
868 rdp->qlen = 0;
869 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
870}
871
872/*
873 * Adopt previously orphaned RCU callbacks.
874 */
875static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
876{
877 unsigned long flags;
878 struct rcu_data *rdp;
879
880 spin_lock_irqsave(&rsp->onofflock, flags);
881 rdp = rsp->rda[smp_processor_id()];
882 if (rsp->orphan_cbs_list == NULL) {
883 spin_unlock_irqrestore(&rsp->onofflock, flags);
884 return;
885 }
886 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
887 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
888 rdp->qlen += rsp->orphan_qlen;
889 rsp->orphan_cbs_list = NULL;
890 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
891 rsp->orphan_qlen = 0;
892 spin_unlock_irqrestore(&rsp->onofflock, flags);
893}
894
895/*
845 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy 896 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
846 * and move all callbacks from the outgoing CPU to the current one. 897 * and move all callbacks from the outgoing CPU to the current one.
847 */ 898 */
848static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 899static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
849{ 900{
850 int i;
851 unsigned long flags; 901 unsigned long flags;
852 long lastcomp; 902 long lastcomp;
853 unsigned long mask; 903 unsigned long mask;
854 struct rcu_data *rdp = rsp->rda[cpu]; 904 struct rcu_data *rdp = rsp->rda[cpu];
855 struct rcu_data *rdp_me;
856 struct rcu_node *rnp; 905 struct rcu_node *rnp;
857 906
858 /* Exclude any attempts to start a new grace period. */ 907 /* Exclude any attempts to start a new grace period. */
@@ -868,39 +917,29 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
868 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 917 spin_unlock(&rnp->lock); /* irqs remain disabled. */
869 break; 918 break;
870 } 919 }
871 rcu_preempt_offline_tasks(rsp, rnp, rdp); 920
921 /*
922 * If there was a task blocking the current grace period,
923 * and if all CPUs have checked in, we need to propagate
924 * the quiescent state up the rcu_node hierarchy. But that
925 * is inconvenient at the moment due to deadlock issues if
926 * this should end the current grace period. So set the
927 * offlined CPU's bit in ->qsmask in order to force the
928 * next force_quiescent_state() invocation to clean up this
929 * mess in a deadlock-free manner.
930 */
931 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
932 rnp->qsmask |= mask;
933
872 mask = rnp->grpmask; 934 mask = rnp->grpmask;
873 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 935 spin_unlock(&rnp->lock); /* irqs remain disabled. */
874 rnp = rnp->parent; 936 rnp = rnp->parent;
875 } while (rnp != NULL); 937 } while (rnp != NULL);
876 lastcomp = rsp->completed; 938 lastcomp = rsp->completed;
877 939
878 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 940 spin_unlock_irqrestore(&rsp->onofflock, flags);
879 941
880 /* 942 rcu_adopt_orphan_cbs(rsp);
881 * Move callbacks from the outgoing CPU to the running CPU.
882 * Note that the outgoing CPU is now quiscent, so it is now
883 * (uncharacteristically) safe to access its rcu_data structure.
884 * Note also that we must carefully retain the order of the
885 * outgoing CPU's callbacks in order for rcu_barrier() to work
886 * correctly. Finally, note that we start all the callbacks
887 * afresh, even those that have passed through a grace period
888 * and are therefore ready to invoke. The theory is that hotplug
889 * events are rare, and that if they are frequent enough to
890 * indefinitely delay callbacks, you have far worse things to
891 * be worrying about.
892 */
893 rdp_me = rsp->rda[smp_processor_id()];
894 if (rdp->nxtlist != NULL) {
895 *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
896 rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
897 rdp->nxtlist = NULL;
898 for (i = 0; i < RCU_NEXT_SIZE; i++)
899 rdp->nxttail[i] = &rdp->nxtlist;
900 rdp_me->qlen += rdp->qlen;
901 rdp->qlen = 0;
902 }
903 local_irq_restore(flags);
904} 943}
905 944
906/* 945/*
@@ -918,6 +957,14 @@ static void rcu_offline_cpu(int cpu)
918 957
919#else /* #ifdef CONFIG_HOTPLUG_CPU */ 958#else /* #ifdef CONFIG_HOTPLUG_CPU */
920 959
960static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
961{
962}
963
964static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
965{
966}
967
921static void rcu_offline_cpu(int cpu) 968static void rcu_offline_cpu(int cpu)
922{ 969{
923} 970}
@@ -928,7 +975,7 @@ static void rcu_offline_cpu(int cpu)
928 * Invoke any RCU callbacks that have made it to the end of their grace 975 * Invoke any RCU callbacks that have made it to the end of their grace
929 * period. Thottle as specified by rdp->blimit. 976 * period. Thottle as specified by rdp->blimit.
930 */ 977 */
931static void rcu_do_batch(struct rcu_data *rdp) 978static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
932{ 979{
933 unsigned long flags; 980 unsigned long flags;
934 struct rcu_head *next, *list, **tail; 981 struct rcu_head *next, *list, **tail;
@@ -981,6 +1028,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
981 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 1028 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
982 rdp->blimit = blimit; 1029 rdp->blimit = blimit;
983 1030
1031 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1032 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1033 rdp->qlen_last_fqs_check = 0;
1034 rdp->n_force_qs_snap = rsp->n_force_qs;
1035 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1036 rdp->qlen_last_fqs_check = rdp->qlen;
1037
984 local_irq_restore(flags); 1038 local_irq_restore(flags);
985 1039
986 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1040 /* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1050,33 +1104,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1050 int cpu; 1104 int cpu;
1051 unsigned long flags; 1105 unsigned long flags;
1052 unsigned long mask; 1106 unsigned long mask;
1053 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 1107 struct rcu_node *rnp;
1054 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
1055 1108
1056 for (; rnp_cur < rnp_end; rnp_cur++) { 1109 rcu_for_each_leaf_node(rsp, rnp) {
1057 mask = 0; 1110 mask = 0;
1058 spin_lock_irqsave(&rnp_cur->lock, flags); 1111 spin_lock_irqsave(&rnp->lock, flags);
1059 if (rsp->completed != lastcomp) { 1112 if (rsp->completed != lastcomp) {
1060 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1113 spin_unlock_irqrestore(&rnp->lock, flags);
1061 return 1; 1114 return 1;
1062 } 1115 }
1063 if (rnp_cur->qsmask == 0) { 1116 if (rnp->qsmask == 0) {
1064 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1117 spin_unlock_irqrestore(&rnp->lock, flags);
1065 continue; 1118 continue;
1066 } 1119 }
1067 cpu = rnp_cur->grplo; 1120 cpu = rnp->grplo;
1068 bit = 1; 1121 bit = 1;
1069 for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { 1122 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1070 if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1123 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1071 mask |= bit; 1124 mask |= bit;
1072 } 1125 }
1073 if (mask != 0 && rsp->completed == lastcomp) { 1126 if (mask != 0 && rsp->completed == lastcomp) {
1074 1127
1075 /* cpu_quiet_msk() releases rnp_cur->lock. */ 1128 /* cpu_quiet_msk() releases rnp->lock. */
1076 cpu_quiet_msk(mask, rsp, rnp_cur, flags); 1129 cpu_quiet_msk(mask, rsp, rnp, flags);
1077 continue; 1130 continue;
1078 } 1131 }
1079 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1132 spin_unlock_irqrestore(&rnp->lock, flags);
1080 } 1133 }
1081 return 0; 1134 return 0;
1082} 1135}
@@ -1092,7 +1145,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1092 struct rcu_node *rnp = rcu_get_root(rsp); 1145 struct rcu_node *rnp = rcu_get_root(rsp);
1093 u8 signaled; 1146 u8 signaled;
1094 1147
1095 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) 1148 if (!rcu_gp_in_progress(rsp))
1096 return; /* No grace period in progress, nothing to force. */ 1149 return; /* No grace period in progress, nothing to force. */
1097 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { 1150 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1098 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1151 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
@@ -1113,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1113 } 1166 }
1114 spin_unlock(&rnp->lock); 1167 spin_unlock(&rnp->lock);
1115 switch (signaled) { 1168 switch (signaled) {
1169 case RCU_GP_IDLE:
1116 case RCU_GP_INIT: 1170 case RCU_GP_INIT:
1117 1171
1118 break; /* grace period still initializing, ignore. */ 1172 break; /* grace period idle or initializing, ignore. */
1119 1173
1120 case RCU_SAVE_DYNTICK: 1174 case RCU_SAVE_DYNTICK:
1121 1175
@@ -1129,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1129 1183
1130 /* Update state, record completion counter. */ 1184 /* Update state, record completion counter. */
1131 spin_lock(&rnp->lock); 1185 spin_lock(&rnp->lock);
1132 if (lastcomp == rsp->completed) { 1186 if (lastcomp == rsp->completed &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) {
1133 rsp->signaled = RCU_FORCE_QS; 1188 rsp->signaled = RCU_FORCE_QS;
1134 dyntick_record_completed(rsp, lastcomp); 1189 dyntick_record_completed(rsp, lastcomp);
1135 } 1190 }
@@ -1195,7 +1250,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1195 } 1250 }
1196 1251
1197 /* If there are callbacks ready, invoke them. */ 1252 /* If there are callbacks ready, invoke them. */
1198 rcu_do_batch(rdp); 1253 rcu_do_batch(rsp, rdp);
1199} 1254}
1200 1255
1201/* 1256/*
@@ -1251,7 +1306,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1251 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1306 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1252 1307
1253 /* Start a new grace period if one not already started. */ 1308 /* Start a new grace period if one not already started. */
1254 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { 1309 if (!rcu_gp_in_progress(rsp)) {
1255 unsigned long nestflag; 1310 unsigned long nestflag;
1256 struct rcu_node *rnp_root = rcu_get_root(rsp); 1311 struct rcu_node *rnp_root = rcu_get_root(rsp);
1257 1312
@@ -1259,10 +1314,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1259 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1314 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1260 } 1315 }
1261 1316
1262 /* Force the grace period if too many callbacks or too long waiting. */ 1317 /*
1263 if (unlikely(++rdp->qlen > qhimark)) { 1318 * Force the grace period if too many callbacks or too long waiting.
1319 * Enforce hysteresis, and don't invoke force_quiescent_state()
1320 * if some other CPU has recently done so. Also, don't bother
1321 * invoking force_quiescent_state() if the newly enqueued callback
1322 * is the only one waiting for a grace period to complete.
1323 */
1324 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1264 rdp->blimit = LONG_MAX; 1325 rdp->blimit = LONG_MAX;
1265 force_quiescent_state(rsp, 0); 1326 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1327 *rdp->nxttail[RCU_DONE_TAIL] != head)
1328 force_quiescent_state(rsp, 0);
1329 rdp->n_force_qs_snap = rsp->n_force_qs;
1330 rdp->qlen_last_fqs_check = rdp->qlen;
1266 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1331 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1267 force_quiescent_state(rsp, 1); 1332 force_quiescent_state(rsp, 1);
1268 local_irq_restore(flags); 1333 local_irq_restore(flags);
@@ -1331,7 +1396,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1331 } 1396 }
1332 1397
1333 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1398 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1334 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1399 if (rcu_gp_in_progress(rsp) &&
1335 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1400 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
1336 rdp->n_rp_need_fqs++; 1401 rdp->n_rp_need_fqs++;
1337 return 1; 1402 return 1;
@@ -1368,6 +1433,82 @@ int rcu_needs_cpu(int cpu)
1368 rcu_preempt_needs_cpu(cpu); 1433 rcu_preempt_needs_cpu(cpu);
1369} 1434}
1370 1435
1436static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1437static atomic_t rcu_barrier_cpu_count;
1438static DEFINE_MUTEX(rcu_barrier_mutex);
1439static struct completion rcu_barrier_completion;
1440
1441static void rcu_barrier_callback(struct rcu_head *notused)
1442{
1443 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1444 complete(&rcu_barrier_completion);
1445}
1446
1447/*
1448 * Called with preemption disabled, and from cross-cpu IRQ context.
1449 */
1450static void rcu_barrier_func(void *type)
1451{
1452 int cpu = smp_processor_id();
1453 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1454 void (*call_rcu_func)(struct rcu_head *head,
1455 void (*func)(struct rcu_head *head));
1456
1457 atomic_inc(&rcu_barrier_cpu_count);
1458 call_rcu_func = type;
1459 call_rcu_func(head, rcu_barrier_callback);
1460}
1461
1462/*
1463 * Orchestrate the specified type of RCU barrier, waiting for all
1464 * RCU callbacks of the specified type to complete.
1465 */
1466static void _rcu_barrier(struct rcu_state *rsp,
1467 void (*call_rcu_func)(struct rcu_head *head,
1468 void (*func)(struct rcu_head *head)))
1469{
1470 BUG_ON(in_interrupt());
1471 /* Take mutex to serialize concurrent rcu_barrier() requests. */
1472 mutex_lock(&rcu_barrier_mutex);
1473 init_completion(&rcu_barrier_completion);
1474 /*
1475 * Initialize rcu_barrier_cpu_count to 1, then invoke
1476 * rcu_barrier_func() on each CPU, so that each CPU also has
1477 * incremented rcu_barrier_cpu_count. Only then is it safe to
1478 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
1479 * might complete its grace period before all of the other CPUs
1480 * did their increment, causing this function to return too
1481 * early.
1482 */
1483 atomic_set(&rcu_barrier_cpu_count, 1);
1484 preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */
1485 rcu_adopt_orphan_cbs(rsp);
1486 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
1487 preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */
1488 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1489 complete(&rcu_barrier_completion);
1490 wait_for_completion(&rcu_barrier_completion);
1491 mutex_unlock(&rcu_barrier_mutex);
1492}
1493
1494/**
1495 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
1496 */
1497void rcu_barrier_bh(void)
1498{
1499 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
1500}
1501EXPORT_SYMBOL_GPL(rcu_barrier_bh);
1502
1503/**
1504 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
1505 */
1506void rcu_barrier_sched(void)
1507{
1508 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
1509}
1510EXPORT_SYMBOL_GPL(rcu_barrier_sched);
1511
1371/* 1512/*
1372 * Do boot-time initialization of a CPU's per-CPU RCU data. 1513 * Do boot-time initialization of a CPU's per-CPU RCU data.
1373 */ 1514 */
@@ -1418,6 +1559,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1418 rdp->beenonline = 1; /* We have now been online. */ 1559 rdp->beenonline = 1; /* We have now been online. */
1419 rdp->preemptable = preemptable; 1560 rdp->preemptable = preemptable;
1420 rdp->passed_quiesc_completed = lastcomp - 1; 1561 rdp->passed_quiesc_completed = lastcomp - 1;
1562 rdp->qlen_last_fqs_check = 0;
1563 rdp->n_force_qs_snap = rsp->n_force_qs;
1421 rdp->blimit = blimit; 1564 rdp->blimit = blimit;
1422 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1565 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1423 1566
@@ -1464,6 +1607,22 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1464 case CPU_UP_PREPARE_FROZEN: 1607 case CPU_UP_PREPARE_FROZEN:
1465 rcu_online_cpu(cpu); 1608 rcu_online_cpu(cpu);
1466 break; 1609 break;
1610 case CPU_DYING:
1611 case CPU_DYING_FROZEN:
1612 /*
1613 * preempt_disable() in _rcu_barrier() prevents stop_machine(),
1614 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
1615 * returns, all online cpus have queued rcu_barrier_func().
1616 * The dying CPU clears its cpu_online_mask bit and
1617 * moves all of its RCU callbacks to ->orphan_cbs_list
1618 * in the context of stop_machine(), so subsequent calls
1619 * to _rcu_barrier() will adopt these callbacks and only
1620 * then queue rcu_barrier_func() on all remaining CPUs.
1621 */
1622 rcu_send_cbs_to_orphanage(&rcu_bh_state);
1623 rcu_send_cbs_to_orphanage(&rcu_sched_state);
1624 rcu_preempt_send_cbs_to_orphanage();
1625 break;
1467 case CPU_DEAD: 1626 case CPU_DEAD:
1468 case CPU_DEAD_FROZEN: 1627 case CPU_DEAD_FROZEN:
1469 case CPU_UP_CANCELED: 1628 case CPU_UP_CANCELED:
@@ -1526,7 +1685,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1526 cpustride *= rsp->levelspread[i]; 1685 cpustride *= rsp->levelspread[i];
1527 rnp = rsp->level[i]; 1686 rnp = rsp->level[i];
1528 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1687 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1529 spin_lock_init(&rnp->lock); 1688 if (rnp != rcu_get_root(rsp))
1689 spin_lock_init(&rnp->lock);
1530 rnp->gpnum = 0; 1690 rnp->gpnum = 0;
1531 rnp->qsmask = 0; 1691 rnp->qsmask = 0;
1532 rnp->qsmaskinit = 0; 1692 rnp->qsmaskinit = 0;
@@ -1549,6 +1709,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1549 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1709 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1550 } 1710 }
1551 } 1711 }
1712 spin_lock_init(&rcu_get_root(rsp)->lock);
1552} 1713}
1553 1714
1554/* 1715/*
@@ -1558,6 +1719,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1558 */ 1719 */
1559#define RCU_INIT_FLAVOR(rsp, rcu_data) \ 1720#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1560do { \ 1721do { \
1722 int i; \
1723 int j; \
1724 struct rcu_node *rnp; \
1725 \
1561 rcu_init_one(rsp); \ 1726 rcu_init_one(rsp); \
1562 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1727 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1563 j = 0; \ 1728 j = 0; \
@@ -1570,31 +1735,8 @@ do { \
1570 } \ 1735 } \
1571} while (0) 1736} while (0)
1572 1737
1573#ifdef CONFIG_TREE_PREEMPT_RCU
1574
1575void __init __rcu_init_preempt(void)
1576{
1577 int i; /* All used by RCU_INIT_FLAVOR(). */
1578 int j;
1579 struct rcu_node *rnp;
1580
1581 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1582}
1583
1584#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1585
1586void __init __rcu_init_preempt(void)
1587{
1588}
1589
1590#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1591
1592void __init __rcu_init(void) 1738void __init __rcu_init(void)
1593{ 1739{
1594 int i; /* All used by RCU_INIT_FLAVOR(). */
1595 int j;
1596 struct rcu_node *rnp;
1597
1598 rcu_bootup_announce(); 1740 rcu_bootup_announce();
1599#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1741#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1600 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1742 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
@@ -1605,6 +1747,4 @@ void __init __rcu_init(void)
1605 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1747 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1606} 1748}
1607 1749
1608module_param(blimit, int, 0); 1750#include "rcutree_plugin.h"
1609module_param(qhimark, int, 0);
1610module_param(qlowmark, int, 0);