aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2009-11-14 08:38:28 -0500
committerTakashi Iwai <tiwai@suse.de>2009-11-14 08:38:28 -0500
commit0c3c35e148dbc03106038dd25816fb9f3a084d86 (patch)
tree8b8cc6a027353a0f242f61362b35b0942da61b83 /kernel/rcutree.c
parent50d40f187f9182ee8caa1b83f80a0e11e2226baa (diff)
parent5e08fe570c2dbabb5015c37049eb9a451e55c890 (diff)
Merge branch 'fix/misc' into topic/misc
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c374
1 files changed, 254 insertions, 120 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 52b06f6e158..0536125b049 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -49,13 +49,6 @@
49 49
50#include "rcutree.h" 50#include "rcutree.h"
51 51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53static struct lock_class_key rcu_lock_key;
54struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56EXPORT_SYMBOL_GPL(rcu_lock_map);
57#endif
58
59/* Data structures. */ 52/* Data structures. */
60 53
61#define RCU_STATE_INITIALIZER(name) { \ 54#define RCU_STATE_INITIALIZER(name) { \
@@ -70,6 +63,9 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
70 .gpnum = -300, \ 63 .gpnum = -300, \
71 .completed = -300, \ 64 .completed = -300, \
72 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
66 .orphan_cbs_list = NULL, \
67 .orphan_cbs_tail = &name.orphan_cbs_list, \
68 .orphan_qlen = 0, \
73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ 69 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
74 .n_force_qs = 0, \ 70 .n_force_qs = 0, \
75 .n_force_qs_ngp = 0, \ 71 .n_force_qs_ngp = 0, \
@@ -81,24 +77,16 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 77struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 78DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 79
84extern long rcu_batches_completed_sched(void);
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
87 struct rcu_node *rnp, unsigned long flags);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
89#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp,
93 struct rcu_data *rdp);
94static void __call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *rcu),
96 struct rcu_state *rsp);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
99 int preemptable);
100 80
101#include "rcutree_plugin.h" 81/*
82 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
83 * permit this function to be invoked without holding the root rcu_node
84 * structure's ->lock, but of course results can be subject to change.
85 */
86static int rcu_gp_in_progress(struct rcu_state *rsp)
87{
88 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
89}
102 90
103/* 91/*
104 * Note a quiescent state. Because we do not need to know 92 * Note a quiescent state. Because we do not need to know
@@ -137,6 +125,10 @@ static int blimit = 10; /* Maximum callbacks per softirq. */
137static int qhimark = 10000; /* If this many pending, ignore blimit. */ 125static int qhimark = 10000; /* If this many pending, ignore blimit. */
138static int qlowmark = 100; /* Once only this many pending, use blimit. */ 126static int qlowmark = 100; /* Once only this many pending, use blimit. */
139 127
128module_param(blimit, int, 0);
129module_param(qhimark, int, 0);
130module_param(qlowmark, int, 0);
131
140static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 132static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
141static int rcu_pending(int cpu); 133static int rcu_pending(int cpu);
142 134
@@ -173,9 +165,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
173static int 165static int
174cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 166cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
175{ 167{
176 /* ACCESS_ONCE() because we are accessing outside of lock. */ 168 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
177 return *rdp->nxttail[RCU_DONE_TAIL] &&
178 ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
179} 169}
180 170
181/* 171/*
@@ -369,7 +359,7 @@ static long dyntick_recall_completed(struct rcu_state *rsp)
369/* 359/*
370 * Snapshot the specified CPU's dynticks counter so that we can later 360 * Snapshot the specified CPU's dynticks counter so that we can later
371 * credit them with an implicit quiescent state. Return 1 if this CPU 361 * credit them with an implicit quiescent state. Return 1 if this CPU
372 * is already in a quiescent state courtesy of dynticks idle mode. 362 * is in dynticks idle mode, which is an extended quiescent state.
373 */ 363 */
374static int dyntick_save_progress_counter(struct rcu_data *rdp) 364static int dyntick_save_progress_counter(struct rcu_data *rdp)
375{ 365{
@@ -475,30 +465,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
475 long delta; 465 long delta;
476 unsigned long flags; 466 unsigned long flags;
477 struct rcu_node *rnp = rcu_get_root(rsp); 467 struct rcu_node *rnp = rcu_get_root(rsp);
478 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
479 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
480 468
481 /* Only let one CPU complain about others per time interval. */ 469 /* Only let one CPU complain about others per time interval. */
482 470
483 spin_lock_irqsave(&rnp->lock, flags); 471 spin_lock_irqsave(&rnp->lock, flags);
484 delta = jiffies - rsp->jiffies_stall; 472 delta = jiffies - rsp->jiffies_stall;
485 if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { 473 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
486 spin_unlock_irqrestore(&rnp->lock, flags); 474 spin_unlock_irqrestore(&rnp->lock, flags);
487 return; 475 return;
488 } 476 }
489 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 477 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
478
479 /*
480 * Now rat on any tasks that got kicked up to the root rcu_node
481 * due to CPU offlining.
482 */
483 rcu_print_task_stall(rnp);
490 spin_unlock_irqrestore(&rnp->lock, flags); 484 spin_unlock_irqrestore(&rnp->lock, flags);
491 485
492 /* OK, time to rat on our buddy... */ 486 /* OK, time to rat on our buddy... */
493 487
494 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 488 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
495 for (; rnp_cur < rnp_end; rnp_cur++) { 489 rcu_for_each_leaf_node(rsp, rnp) {
496 rcu_print_task_stall(rnp); 490 rcu_print_task_stall(rnp);
497 if (rnp_cur->qsmask == 0) 491 if (rnp->qsmask == 0)
498 continue; 492 continue;
499 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 493 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
500 if (rnp_cur->qsmask & (1UL << cpu)) 494 if (rnp->qsmask & (1UL << cpu))
501 printk(" %d", rnp_cur->grplo + cpu); 495 printk(" %d", rnp->grplo + cpu);
502 } 496 }
503 printk(" (detected by %d, t=%ld jiffies)\n", 497 printk(" (detected by %d, t=%ld jiffies)\n",
504 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 498 smp_processor_id(), (long)(jiffies - rsp->gp_start));
@@ -537,8 +531,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
537 /* We haven't checked in, so go dump stack. */ 531 /* We haven't checked in, so go dump stack. */
538 print_cpu_stall(rsp); 532 print_cpu_stall(rsp);
539 533
540 } else if (rsp->gpnum != rsp->completed && 534 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
541 delta >= RCU_STALL_RAT_DELAY) {
542 535
543 /* They had two time units to dump stack, so complain. */ 536 /* They had two time units to dump stack, so complain. */
544 print_other_cpu_stall(rsp); 537 print_other_cpu_stall(rsp);
@@ -617,9 +610,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
617 note_new_gpnum(rsp, rdp); 610 note_new_gpnum(rsp, rdp);
618 611
619 /* 612 /*
620 * Because we are first, we know that all our callbacks will 613 * Because this CPU just now started the new grace period, we know
621 * be covered by this upcoming grace period, even the ones 614 * that all of its callbacks will be covered by this upcoming grace
622 * that were registered arbitrarily recently. 615 * period, even the ones that were registered arbitrarily recently.
616 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
617 *
618 * Other CPUs cannot be sure exactly when the grace period started.
619 * Therefore, their recently registered callbacks must pass through
620 * an additional RCU_NEXT_READY stage, so that they will be handled
621 * by the next RCU grace period.
623 */ 622 */
624 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 623 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
625 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 624 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
@@ -657,7 +656,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
657 * one corresponding to this CPU, due to the fact that we have 656 * one corresponding to this CPU, due to the fact that we have
658 * irqs disabled. 657 * irqs disabled.
659 */ 658 */
660 for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) { 659 rcu_for_each_node_breadth_first(rsp, rnp) {
661 spin_lock(&rnp->lock); /* irqs already disabled. */ 660 spin_lock(&rnp->lock); /* irqs already disabled. */
662 rcu_preempt_check_blocked_tasks(rnp); 661 rcu_preempt_check_blocked_tasks(rnp);
663 rnp->qsmask = rnp->qsmaskinit; 662 rnp->qsmask = rnp->qsmaskinit;
@@ -703,9 +702,9 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
703 * hold rnp->lock, as required by rcu_start_gp(), which will release it. 702 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
704 */ 703 */
705static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 704static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
706 __releases(rnp->lock) 705 __releases(rcu_get_root(rsp)->lock)
707{ 706{
708 WARN_ON_ONCE(rsp->completed == rsp->gpnum); 707 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
709 rsp->completed = rsp->gpnum; 708 rsp->completed = rsp->gpnum;
710 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 709 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
711 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 710 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
@@ -842,17 +841,63 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
842#ifdef CONFIG_HOTPLUG_CPU 841#ifdef CONFIG_HOTPLUG_CPU
843 842
844/* 843/*
844 * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the
845 * specified flavor of RCU. The callbacks will be adopted by the next
846 * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever
847 * comes first. Because this is invoked from the CPU_DYING notifier,
848 * irqs are already disabled.
849 */
850static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
851{
852 int i;
853 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
854
855 if (rdp->nxtlist == NULL)
856 return; /* irqs disabled, so comparison is stable. */
857 spin_lock(&rsp->onofflock); /* irqs already disabled. */
858 *rsp->orphan_cbs_tail = rdp->nxtlist;
859 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
860 rdp->nxtlist = NULL;
861 for (i = 0; i < RCU_NEXT_SIZE; i++)
862 rdp->nxttail[i] = &rdp->nxtlist;
863 rsp->orphan_qlen += rdp->qlen;
864 rdp->qlen = 0;
865 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
866}
867
868/*
869 * Adopt previously orphaned RCU callbacks.
870 */
871static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
872{
873 unsigned long flags;
874 struct rcu_data *rdp;
875
876 spin_lock_irqsave(&rsp->onofflock, flags);
877 rdp = rsp->rda[smp_processor_id()];
878 if (rsp->orphan_cbs_list == NULL) {
879 spin_unlock_irqrestore(&rsp->onofflock, flags);
880 return;
881 }
882 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
883 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
884 rdp->qlen += rsp->orphan_qlen;
885 rsp->orphan_cbs_list = NULL;
886 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
887 rsp->orphan_qlen = 0;
888 spin_unlock_irqrestore(&rsp->onofflock, flags);
889}
890
891/*
845 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy 892 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
846 * and move all callbacks from the outgoing CPU to the current one. 893 * and move all callbacks from the outgoing CPU to the current one.
847 */ 894 */
848static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 895static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
849{ 896{
850 int i;
851 unsigned long flags; 897 unsigned long flags;
852 long lastcomp; 898 long lastcomp;
853 unsigned long mask; 899 unsigned long mask;
854 struct rcu_data *rdp = rsp->rda[cpu]; 900 struct rcu_data *rdp = rsp->rda[cpu];
855 struct rcu_data *rdp_me;
856 struct rcu_node *rnp; 901 struct rcu_node *rnp;
857 902
858 /* Exclude any attempts to start a new grace period. */ 903 /* Exclude any attempts to start a new grace period. */
@@ -868,39 +913,29 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
868 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 913 spin_unlock(&rnp->lock); /* irqs remain disabled. */
869 break; 914 break;
870 } 915 }
871 rcu_preempt_offline_tasks(rsp, rnp, rdp); 916
917 /*
918 * If there was a task blocking the current grace period,
919 * and if all CPUs have checked in, we need to propagate
920 * the quiescent state up the rcu_node hierarchy. But that
921 * is inconvenient at the moment due to deadlock issues if
922 * this should end the current grace period. So set the
923 * offlined CPU's bit in ->qsmask in order to force the
924 * next force_quiescent_state() invocation to clean up this
925 * mess in a deadlock-free manner.
926 */
927 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
928 rnp->qsmask |= mask;
929
872 mask = rnp->grpmask; 930 mask = rnp->grpmask;
873 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 931 spin_unlock(&rnp->lock); /* irqs remain disabled. */
874 rnp = rnp->parent; 932 rnp = rnp->parent;
875 } while (rnp != NULL); 933 } while (rnp != NULL);
876 lastcomp = rsp->completed; 934 lastcomp = rsp->completed;
877 935
878 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 936 spin_unlock_irqrestore(&rsp->onofflock, flags);
879 937
880 /* 938 rcu_adopt_orphan_cbs(rsp);
881 * Move callbacks from the outgoing CPU to the running CPU.
882 * Note that the outgoing CPU is now quiscent, so it is now
883 * (uncharacteristically) safe to access its rcu_data structure.
884 * Note also that we must carefully retain the order of the
885 * outgoing CPU's callbacks in order for rcu_barrier() to work
886 * correctly. Finally, note that we start all the callbacks
887 * afresh, even those that have passed through a grace period
888 * and are therefore ready to invoke. The theory is that hotplug
889 * events are rare, and that if they are frequent enough to
890 * indefinitely delay callbacks, you have far worse things to
891 * be worrying about.
892 */
893 rdp_me = rsp->rda[smp_processor_id()];
894 if (rdp->nxtlist != NULL) {
895 *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
896 rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
897 rdp->nxtlist = NULL;
898 for (i = 0; i < RCU_NEXT_SIZE; i++)
899 rdp->nxttail[i] = &rdp->nxtlist;
900 rdp_me->qlen += rdp->qlen;
901 rdp->qlen = 0;
902 }
903 local_irq_restore(flags);
904} 939}
905 940
906/* 941/*
@@ -918,6 +953,14 @@ static void rcu_offline_cpu(int cpu)
918 953
919#else /* #ifdef CONFIG_HOTPLUG_CPU */ 954#else /* #ifdef CONFIG_HOTPLUG_CPU */
920 955
956static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
957{
958}
959
960static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
961{
962}
963
921static void rcu_offline_cpu(int cpu) 964static void rcu_offline_cpu(int cpu)
922{ 965{
923} 966}
@@ -928,7 +971,7 @@ static void rcu_offline_cpu(int cpu)
928 * Invoke any RCU callbacks that have made it to the end of their grace 971 * Invoke any RCU callbacks that have made it to the end of their grace
929 * period. Thottle as specified by rdp->blimit. 972 * period. Thottle as specified by rdp->blimit.
930 */ 973 */
931static void rcu_do_batch(struct rcu_data *rdp) 974static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
932{ 975{
933 unsigned long flags; 976 unsigned long flags;
934 struct rcu_head *next, *list, **tail; 977 struct rcu_head *next, *list, **tail;
@@ -981,6 +1024,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
981 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 1024 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
982 rdp->blimit = blimit; 1025 rdp->blimit = blimit;
983 1026
1027 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1028 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1029 rdp->qlen_last_fqs_check = 0;
1030 rdp->n_force_qs_snap = rsp->n_force_qs;
1031 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1032 rdp->qlen_last_fqs_check = rdp->qlen;
1033
984 local_irq_restore(flags); 1034 local_irq_restore(flags);
985 1035
986 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1036 /* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1050,33 +1100,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1050 int cpu; 1100 int cpu;
1051 unsigned long flags; 1101 unsigned long flags;
1052 unsigned long mask; 1102 unsigned long mask;
1053 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 1103 struct rcu_node *rnp;
1054 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
1055 1104
1056 for (; rnp_cur < rnp_end; rnp_cur++) { 1105 rcu_for_each_leaf_node(rsp, rnp) {
1057 mask = 0; 1106 mask = 0;
1058 spin_lock_irqsave(&rnp_cur->lock, flags); 1107 spin_lock_irqsave(&rnp->lock, flags);
1059 if (rsp->completed != lastcomp) { 1108 if (rsp->completed != lastcomp) {
1060 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1109 spin_unlock_irqrestore(&rnp->lock, flags);
1061 return 1; 1110 return 1;
1062 } 1111 }
1063 if (rnp_cur->qsmask == 0) { 1112 if (rnp->qsmask == 0) {
1064 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1113 spin_unlock_irqrestore(&rnp->lock, flags);
1065 continue; 1114 continue;
1066 } 1115 }
1067 cpu = rnp_cur->grplo; 1116 cpu = rnp->grplo;
1068 bit = 1; 1117 bit = 1;
1069 for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { 1118 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1070 if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1119 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1071 mask |= bit; 1120 mask |= bit;
1072 } 1121 }
1073 if (mask != 0 && rsp->completed == lastcomp) { 1122 if (mask != 0 && rsp->completed == lastcomp) {
1074 1123
1075 /* cpu_quiet_msk() releases rnp_cur->lock. */ 1124 /* cpu_quiet_msk() releases rnp->lock. */
1076 cpu_quiet_msk(mask, rsp, rnp_cur, flags); 1125 cpu_quiet_msk(mask, rsp, rnp, flags);
1077 continue; 1126 continue;
1078 } 1127 }
1079 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1128 spin_unlock_irqrestore(&rnp->lock, flags);
1080 } 1129 }
1081 return 0; 1130 return 0;
1082} 1131}
@@ -1092,7 +1141,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1092 struct rcu_node *rnp = rcu_get_root(rsp); 1141 struct rcu_node *rnp = rcu_get_root(rsp);
1093 u8 signaled; 1142 u8 signaled;
1094 1143
1095 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) 1144 if (!rcu_gp_in_progress(rsp))
1096 return; /* No grace period in progress, nothing to force. */ 1145 return; /* No grace period in progress, nothing to force. */
1097 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { 1146 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1098 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1147 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
@@ -1195,7 +1244,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1195 } 1244 }
1196 1245
1197 /* If there are callbacks ready, invoke them. */ 1246 /* If there are callbacks ready, invoke them. */
1198 rcu_do_batch(rdp); 1247 rcu_do_batch(rsp, rdp);
1199} 1248}
1200 1249
1201/* 1250/*
@@ -1251,7 +1300,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1251 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1300 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1252 1301
1253 /* Start a new grace period if one not already started. */ 1302 /* Start a new grace period if one not already started. */
1254 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { 1303 if (!rcu_gp_in_progress(rsp)) {
1255 unsigned long nestflag; 1304 unsigned long nestflag;
1256 struct rcu_node *rnp_root = rcu_get_root(rsp); 1305 struct rcu_node *rnp_root = rcu_get_root(rsp);
1257 1306
@@ -1259,10 +1308,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1259 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1308 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1260 } 1309 }
1261 1310
1262 /* Force the grace period if too many callbacks or too long waiting. */ 1311 /*
1263 if (unlikely(++rdp->qlen > qhimark)) { 1312 * Force the grace period if too many callbacks or too long waiting.
1313 * Enforce hysteresis, and don't invoke force_quiescent_state()
1314 * if some other CPU has recently done so. Also, don't bother
1315 * invoking force_quiescent_state() if the newly enqueued callback
1316 * is the only one waiting for a grace period to complete.
1317 */
1318 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1264 rdp->blimit = LONG_MAX; 1319 rdp->blimit = LONG_MAX;
1265 force_quiescent_state(rsp, 0); 1320 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1321 *rdp->nxttail[RCU_DONE_TAIL] != head)
1322 force_quiescent_state(rsp, 0);
1323 rdp->n_force_qs_snap = rsp->n_force_qs;
1324 rdp->qlen_last_fqs_check = rdp->qlen;
1266 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1325 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1267 force_quiescent_state(rsp, 1); 1326 force_quiescent_state(rsp, 1);
1268 local_irq_restore(flags); 1327 local_irq_restore(flags);
@@ -1331,7 +1390,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1331 } 1390 }
1332 1391
1333 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1392 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1334 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1393 if (rcu_gp_in_progress(rsp) &&
1335 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1394 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
1336 rdp->n_rp_need_fqs++; 1395 rdp->n_rp_need_fqs++;
1337 return 1; 1396 return 1;
@@ -1368,6 +1427,82 @@ int rcu_needs_cpu(int cpu)
1368 rcu_preempt_needs_cpu(cpu); 1427 rcu_preempt_needs_cpu(cpu);
1369} 1428}
1370 1429
1430static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1431static atomic_t rcu_barrier_cpu_count;
1432static DEFINE_MUTEX(rcu_barrier_mutex);
1433static struct completion rcu_barrier_completion;
1434
1435static void rcu_barrier_callback(struct rcu_head *notused)
1436{
1437 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1438 complete(&rcu_barrier_completion);
1439}
1440
1441/*
1442 * Called with preemption disabled, and from cross-cpu IRQ context.
1443 */
1444static void rcu_barrier_func(void *type)
1445{
1446 int cpu = smp_processor_id();
1447 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1448 void (*call_rcu_func)(struct rcu_head *head,
1449 void (*func)(struct rcu_head *head));
1450
1451 atomic_inc(&rcu_barrier_cpu_count);
1452 call_rcu_func = type;
1453 call_rcu_func(head, rcu_barrier_callback);
1454}
1455
1456/*
1457 * Orchestrate the specified type of RCU barrier, waiting for all
1458 * RCU callbacks of the specified type to complete.
1459 */
1460static void _rcu_barrier(struct rcu_state *rsp,
1461 void (*call_rcu_func)(struct rcu_head *head,
1462 void (*func)(struct rcu_head *head)))
1463{
1464 BUG_ON(in_interrupt());
1465 /* Take mutex to serialize concurrent rcu_barrier() requests. */
1466 mutex_lock(&rcu_barrier_mutex);
1467 init_completion(&rcu_barrier_completion);
1468 /*
1469 * Initialize rcu_barrier_cpu_count to 1, then invoke
1470 * rcu_barrier_func() on each CPU, so that each CPU also has
1471 * incremented rcu_barrier_cpu_count. Only then is it safe to
1472 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
1473 * might complete its grace period before all of the other CPUs
1474 * did their increment, causing this function to return too
1475 * early.
1476 */
1477 atomic_set(&rcu_barrier_cpu_count, 1);
1478 preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */
1479 rcu_adopt_orphan_cbs(rsp);
1480 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
1481 preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */
1482 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1483 complete(&rcu_barrier_completion);
1484 wait_for_completion(&rcu_barrier_completion);
1485 mutex_unlock(&rcu_barrier_mutex);
1486}
1487
1488/**
1489 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
1490 */
1491void rcu_barrier_bh(void)
1492{
1493 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
1494}
1495EXPORT_SYMBOL_GPL(rcu_barrier_bh);
1496
1497/**
1498 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
1499 */
1500void rcu_barrier_sched(void)
1501{
1502 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
1503}
1504EXPORT_SYMBOL_GPL(rcu_barrier_sched);
1505
1371/* 1506/*
1372 * Do boot-time initialization of a CPU's per-CPU RCU data. 1507 * Do boot-time initialization of a CPU's per-CPU RCU data.
1373 */ 1508 */
@@ -1418,6 +1553,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1418 rdp->beenonline = 1; /* We have now been online. */ 1553 rdp->beenonline = 1; /* We have now been online. */
1419 rdp->preemptable = preemptable; 1554 rdp->preemptable = preemptable;
1420 rdp->passed_quiesc_completed = lastcomp - 1; 1555 rdp->passed_quiesc_completed = lastcomp - 1;
1556 rdp->qlen_last_fqs_check = 0;
1557 rdp->n_force_qs_snap = rsp->n_force_qs;
1421 rdp->blimit = blimit; 1558 rdp->blimit = blimit;
1422 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1559 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1423 1560
@@ -1464,6 +1601,22 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1464 case CPU_UP_PREPARE_FROZEN: 1601 case CPU_UP_PREPARE_FROZEN:
1465 rcu_online_cpu(cpu); 1602 rcu_online_cpu(cpu);
1466 break; 1603 break;
1604 case CPU_DYING:
1605 case CPU_DYING_FROZEN:
1606 /*
1607 * preempt_disable() in _rcu_barrier() prevents stop_machine(),
1608 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
1609 * returns, all online cpus have queued rcu_barrier_func().
1610 * The dying CPU clears its cpu_online_mask bit and
1611 * moves all of its RCU callbacks to ->orphan_cbs_list
1612 * in the context of stop_machine(), so subsequent calls
1613 * to _rcu_barrier() will adopt these callbacks and only
1614 * then queue rcu_barrier_func() on all remaining CPUs.
1615 */
1616 rcu_send_cbs_to_orphanage(&rcu_bh_state);
1617 rcu_send_cbs_to_orphanage(&rcu_sched_state);
1618 rcu_preempt_send_cbs_to_orphanage();
1619 break;
1467 case CPU_DEAD: 1620 case CPU_DEAD:
1468 case CPU_DEAD_FROZEN: 1621 case CPU_DEAD_FROZEN:
1469 case CPU_UP_CANCELED: 1622 case CPU_UP_CANCELED:
@@ -1526,7 +1679,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1526 cpustride *= rsp->levelspread[i]; 1679 cpustride *= rsp->levelspread[i];
1527 rnp = rsp->level[i]; 1680 rnp = rsp->level[i];
1528 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1681 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1529 spin_lock_init(&rnp->lock); 1682 if (rnp != rcu_get_root(rsp))
1683 spin_lock_init(&rnp->lock);
1530 rnp->gpnum = 0; 1684 rnp->gpnum = 0;
1531 rnp->qsmask = 0; 1685 rnp->qsmask = 0;
1532 rnp->qsmaskinit = 0; 1686 rnp->qsmaskinit = 0;
@@ -1549,6 +1703,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1549 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1703 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1550 } 1704 }
1551 } 1705 }
1706 spin_lock_init(&rcu_get_root(rsp)->lock);
1552} 1707}
1553 1708
1554/* 1709/*
@@ -1558,6 +1713,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1558 */ 1713 */
1559#define RCU_INIT_FLAVOR(rsp, rcu_data) \ 1714#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1560do { \ 1715do { \
1716 int i; \
1717 int j; \
1718 struct rcu_node *rnp; \
1719 \
1561 rcu_init_one(rsp); \ 1720 rcu_init_one(rsp); \
1562 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1721 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1563 j = 0; \ 1722 j = 0; \
@@ -1570,31 +1729,8 @@ do { \
1570 } \ 1729 } \
1571} while (0) 1730} while (0)
1572 1731
1573#ifdef CONFIG_TREE_PREEMPT_RCU
1574
1575void __init __rcu_init_preempt(void)
1576{
1577 int i; /* All used by RCU_INIT_FLAVOR(). */
1578 int j;
1579 struct rcu_node *rnp;
1580
1581 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1582}
1583
1584#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1585
1586void __init __rcu_init_preempt(void)
1587{
1588}
1589
1590#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1591
1592void __init __rcu_init(void) 1732void __init __rcu_init(void)
1593{ 1733{
1594 int i; /* All used by RCU_INIT_FLAVOR(). */
1595 int j;
1596 struct rcu_node *rnp;
1597
1598 rcu_bootup_announce(); 1734 rcu_bootup_announce();
1599#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1735#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1600 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1736 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
@@ -1605,6 +1741,4 @@ void __init __rcu_init(void)
1605 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1741 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1606} 1742}
1607 1743
1608module_param(blimit, int, 0); 1744#include "rcutree_plugin.h"
1609module_param(qhimark, int, 0);
1610module_param(qlowmark, int, 0);