diff options
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 526 |
1 files changed, 479 insertions, 47 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index dd4aea806f8e..e486f7c3ffb8 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -47,6 +47,8 @@ | |||
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | 49 | #include <linux/kernel_stat.h> |
50 | #include <linux/wait.h> | ||
51 | #include <linux/kthread.h> | ||
50 | 52 | ||
51 | #include "rcutree.h" | 53 | #include "rcutree.h" |
52 | 54 | ||
@@ -79,10 +81,41 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
79 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
80 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
81 | 83 | ||
84 | static struct rcu_state *rcu_state; | ||
85 | |||
82 | int rcu_scheduler_active __read_mostly; | 86 | int rcu_scheduler_active __read_mostly; |
83 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 87 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
84 | 88 | ||
85 | /* | 89 | /* |
90 | * Control variables for per-CPU and per-rcu_node kthreads. These | ||
91 | * handle all flavors of RCU. | ||
92 | */ | ||
93 | static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); | ||
94 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | ||
95 | DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); | ||
96 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | ||
97 | static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq); | ||
98 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | ||
99 | static char rcu_kthreads_spawnable; | ||
100 | |||
101 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | ||
102 | static void invoke_rcu_cpu_kthread(void); | ||
103 | |||
104 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ | ||
105 | |||
106 | /* | ||
107 | * Track the rcutorture test sequence number and the update version | ||
108 | * number within a given test. The rcutorture_testseq is incremented | ||
109 | * on every rcutorture module load and unload, so has an odd value | ||
110 | * when a test is running. The rcutorture_vernum is set to zero | ||
111 | * when rcutorture starts and is incremented on each rcutorture update. | ||
112 | * These variables enable correlating rcutorture output with the | ||
113 | * RCU tracing information. | ||
114 | */ | ||
115 | unsigned long rcutorture_testseq; | ||
116 | unsigned long rcutorture_vernum; | ||
117 | |||
118 | /* | ||
86 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 119 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
87 | * permit this function to be invoked without holding the root rcu_node | 120 | * permit this function to be invoked without holding the root rcu_node |
88 | * structure's ->lock, but of course results can be subject to change. | 121 | * structure's ->lock, but of course results can be subject to change. |
@@ -124,6 +157,7 @@ void rcu_note_context_switch(int cpu) | |||
124 | rcu_sched_qs(cpu); | 157 | rcu_sched_qs(cpu); |
125 | rcu_preempt_note_context_switch(cpu); | 158 | rcu_preempt_note_context_switch(cpu); |
126 | } | 159 | } |
160 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | ||
127 | 161 | ||
128 | #ifdef CONFIG_NO_HZ | 162 | #ifdef CONFIG_NO_HZ |
129 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 163 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
@@ -140,10 +174,8 @@ module_param(blimit, int, 0); | |||
140 | module_param(qhimark, int, 0); | 174 | module_param(qhimark, int, 0); |
141 | module_param(qlowmark, int, 0); | 175 | module_param(qlowmark, int, 0); |
142 | 176 | ||
143 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 177 | int rcu_cpu_stall_suppress __read_mostly; |
144 | int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT; | ||
145 | module_param(rcu_cpu_stall_suppress, int, 0644); | 178 | module_param(rcu_cpu_stall_suppress, int, 0644); |
146 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
147 | 179 | ||
148 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 180 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
149 | static int rcu_pending(int cpu); | 181 | static int rcu_pending(int cpu); |
@@ -176,6 +208,31 @@ void rcu_bh_force_quiescent_state(void) | |||
176 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); | 208 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); |
177 | 209 | ||
178 | /* | 210 | /* |
211 | * Record the number of times rcutorture tests have been initiated and | ||
212 | * terminated. This information allows the debugfs tracing stats to be | ||
213 | * correlated to the rcutorture messages, even when the rcutorture module | ||
214 | * is being repeatedly loaded and unloaded. In other words, we cannot | ||
215 | * store this state in rcutorture itself. | ||
216 | */ | ||
217 | void rcutorture_record_test_transition(void) | ||
218 | { | ||
219 | rcutorture_testseq++; | ||
220 | rcutorture_vernum = 0; | ||
221 | } | ||
222 | EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); | ||
223 | |||
224 | /* | ||
225 | * Record the number of writer passes through the current rcutorture test. | ||
226 | * This is also used to correlate debugfs tracing stats with the rcutorture | ||
227 | * messages. | ||
228 | */ | ||
229 | void rcutorture_record_progress(unsigned long vernum) | ||
230 | { | ||
231 | rcutorture_vernum++; | ||
232 | } | ||
233 | EXPORT_SYMBOL_GPL(rcutorture_record_progress); | ||
234 | |||
235 | /* | ||
179 | * Force a quiescent state for RCU-sched. | 236 | * Force a quiescent state for RCU-sched. |
180 | */ | 237 | */ |
181 | void rcu_sched_force_quiescent_state(void) | 238 | void rcu_sched_force_quiescent_state(void) |
@@ -234,8 +291,8 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
234 | return 1; | 291 | return 1; |
235 | } | 292 | } |
236 | 293 | ||
237 | /* If preemptable RCU, no point in sending reschedule IPI. */ | 294 | /* If preemptible RCU, no point in sending reschedule IPI. */ |
238 | if (rdp->preemptable) | 295 | if (rdp->preemptible) |
239 | return 0; | 296 | return 0; |
240 | 297 | ||
241 | /* The CPU is online, so send it a reschedule IPI. */ | 298 | /* The CPU is online, so send it a reschedule IPI. */ |
@@ -450,8 +507,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
450 | 507 | ||
451 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 508 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
452 | 509 | ||
453 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
454 | |||
455 | int rcu_cpu_stall_suppress __read_mostly; | 510 | int rcu_cpu_stall_suppress __read_mostly; |
456 | 511 | ||
457 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 512 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
@@ -537,21 +592,24 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
537 | 592 | ||
538 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | 593 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) |
539 | { | 594 | { |
540 | long delta; | 595 | unsigned long j; |
596 | unsigned long js; | ||
541 | struct rcu_node *rnp; | 597 | struct rcu_node *rnp; |
542 | 598 | ||
543 | if (rcu_cpu_stall_suppress) | 599 | if (rcu_cpu_stall_suppress) |
544 | return; | 600 | return; |
545 | delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); | 601 | j = ACCESS_ONCE(jiffies); |
602 | js = ACCESS_ONCE(rsp->jiffies_stall); | ||
546 | rnp = rdp->mynode; | 603 | rnp = rdp->mynode; |
547 | if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) { | 604 | if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { |
548 | 605 | ||
549 | /* We haven't checked in, so go dump stack. */ | 606 | /* We haven't checked in, so go dump stack. */ |
550 | print_cpu_stall(rsp); | 607 | print_cpu_stall(rsp); |
551 | 608 | ||
552 | } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) { | 609 | } else if (rcu_gp_in_progress(rsp) && |
610 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { | ||
553 | 611 | ||
554 | /* They had two time units to dump stack, so complain. */ | 612 | /* They had a few time units to dump stack, so complain. */ |
555 | print_other_cpu_stall(rsp); | 613 | print_other_cpu_stall(rsp); |
556 | } | 614 | } |
557 | } | 615 | } |
@@ -587,26 +645,6 @@ static void __init check_cpu_stall_init(void) | |||
587 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | 645 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); |
588 | } | 646 | } |
589 | 647 | ||
590 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
591 | |||
592 | static void record_gp_stall_check_time(struct rcu_state *rsp) | ||
593 | { | ||
594 | } | ||
595 | |||
596 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | ||
597 | { | ||
598 | } | ||
599 | |||
600 | void rcu_cpu_stall_reset(void) | ||
601 | { | ||
602 | } | ||
603 | |||
604 | static void __init check_cpu_stall_init(void) | ||
605 | { | ||
606 | } | ||
607 | |||
608 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
609 | |||
610 | /* | 648 | /* |
611 | * Update CPU-local rcu_data state to record the newly noticed grace period. | 649 | * Update CPU-local rcu_data state to record the newly noticed grace period. |
612 | * This is used both when we started the grace period and when we notice | 650 | * This is used both when we started the grace period and when we notice |
@@ -809,6 +847,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
809 | rnp->completed = rsp->completed; | 847 | rnp->completed = rsp->completed; |
810 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 848 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
811 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | 849 | rcu_start_gp_per_cpu(rsp, rnp, rdp); |
850 | rcu_preempt_boost_start_gp(rnp); | ||
812 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 851 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
813 | return; | 852 | return; |
814 | } | 853 | } |
@@ -844,6 +883,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
844 | rnp->completed = rsp->completed; | 883 | rnp->completed = rsp->completed; |
845 | if (rnp == rdp->mynode) | 884 | if (rnp == rdp->mynode) |
846 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | 885 | rcu_start_gp_per_cpu(rsp, rnp, rdp); |
886 | rcu_preempt_boost_start_gp(rnp); | ||
847 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 887 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
848 | } | 888 | } |
849 | 889 | ||
@@ -864,7 +904,12 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
864 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) | 904 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
865 | __releases(rcu_get_root(rsp)->lock) | 905 | __releases(rcu_get_root(rsp)->lock) |
866 | { | 906 | { |
907 | unsigned long gp_duration; | ||
908 | |||
867 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 909 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
910 | gp_duration = jiffies - rsp->gp_start; | ||
911 | if (gp_duration > rsp->gp_max) | ||
912 | rsp->gp_max = gp_duration; | ||
868 | rsp->completed = rsp->gpnum; | 913 | rsp->completed = rsp->gpnum; |
869 | rsp->signaled = RCU_GP_IDLE; | 914 | rsp->signaled = RCU_GP_IDLE; |
870 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 915 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
@@ -894,7 +939,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, | |||
894 | return; | 939 | return; |
895 | } | 940 | } |
896 | rnp->qsmask &= ~mask; | 941 | rnp->qsmask &= ~mask; |
897 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | 942 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { |
898 | 943 | ||
899 | /* Other bits still set at this level, so done. */ | 944 | /* Other bits still set at this level, so done. */ |
900 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 945 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -1037,6 +1082,8 @@ static void rcu_send_cbs_to_online(struct rcu_state *rsp) | |||
1037 | /* | 1082 | /* |
1038 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy | 1083 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy |
1039 | * and move all callbacks from the outgoing CPU to the current one. | 1084 | * and move all callbacks from the outgoing CPU to the current one. |
1085 | * There can only be one CPU hotplug operation at a time, so no other | ||
1086 | * CPU can be attempting to update rcu_cpu_kthread_task. | ||
1040 | */ | 1087 | */ |
1041 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | 1088 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) |
1042 | { | 1089 | { |
@@ -1045,6 +1092,14 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1045 | int need_report = 0; | 1092 | int need_report = 0; |
1046 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1093 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
1047 | struct rcu_node *rnp; | 1094 | struct rcu_node *rnp; |
1095 | struct task_struct *t; | ||
1096 | |||
1097 | /* Stop the CPU's kthread. */ | ||
1098 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1099 | if (t != NULL) { | ||
1100 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
1101 | kthread_stop(t); | ||
1102 | } | ||
1048 | 1103 | ||
1049 | /* Exclude any attempts to start a new grace period. */ | 1104 | /* Exclude any attempts to start a new grace period. */ |
1050 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1105 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
@@ -1082,6 +1137,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1082 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1137 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1083 | if (need_report & RCU_OFL_TASKS_EXP_GP) | 1138 | if (need_report & RCU_OFL_TASKS_EXP_GP) |
1084 | rcu_report_exp_rnp(rsp, rnp); | 1139 | rcu_report_exp_rnp(rsp, rnp); |
1140 | rcu_node_kthread_setaffinity(rnp, -1); | ||
1085 | } | 1141 | } |
1086 | 1142 | ||
1087 | /* | 1143 | /* |
@@ -1143,7 +1199,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1143 | next = list->next; | 1199 | next = list->next; |
1144 | prefetch(next); | 1200 | prefetch(next); |
1145 | debug_rcu_head_unqueue(list); | 1201 | debug_rcu_head_unqueue(list); |
1146 | list->func(list); | 1202 | __rcu_reclaim(list); |
1147 | list = next; | 1203 | list = next; |
1148 | if (++count >= rdp->blimit) | 1204 | if (++count >= rdp->blimit) |
1149 | break; | 1205 | break; |
@@ -1179,7 +1235,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1179 | 1235 | ||
1180 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1236 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
1181 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 1237 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
1182 | raise_softirq(RCU_SOFTIRQ); | 1238 | invoke_rcu_cpu_kthread(); |
1183 | } | 1239 | } |
1184 | 1240 | ||
1185 | /* | 1241 | /* |
@@ -1225,7 +1281,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
1225 | } | 1281 | } |
1226 | rcu_preempt_check_callbacks(cpu); | 1282 | rcu_preempt_check_callbacks(cpu); |
1227 | if (rcu_pending(cpu)) | 1283 | if (rcu_pending(cpu)) |
1228 | raise_softirq(RCU_SOFTIRQ); | 1284 | invoke_rcu_cpu_kthread(); |
1229 | } | 1285 | } |
1230 | 1286 | ||
1231 | #ifdef CONFIG_SMP | 1287 | #ifdef CONFIG_SMP |
@@ -1233,6 +1289,8 @@ void rcu_check_callbacks(int cpu, int user) | |||
1233 | /* | 1289 | /* |
1234 | * Scan the leaf rcu_node structures, processing dyntick state for any that | 1290 | * Scan the leaf rcu_node structures, processing dyntick state for any that |
1235 | * have not yet encountered a quiescent state, using the function specified. | 1291 | * have not yet encountered a quiescent state, using the function specified. |
1292 | * Also initiate boosting for any threads blocked on the root rcu_node. | ||
1293 | * | ||
1236 | * The caller must have suppressed start of new grace periods. | 1294 | * The caller must have suppressed start of new grace periods. |
1237 | */ | 1295 | */ |
1238 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | 1296 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) |
@@ -1251,7 +1309,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1251 | return; | 1309 | return; |
1252 | } | 1310 | } |
1253 | if (rnp->qsmask == 0) { | 1311 | if (rnp->qsmask == 0) { |
1254 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1312 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
1255 | continue; | 1313 | continue; |
1256 | } | 1314 | } |
1257 | cpu = rnp->grplo; | 1315 | cpu = rnp->grplo; |
@@ -1269,6 +1327,11 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1269 | } | 1327 | } |
1270 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1328 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1271 | } | 1329 | } |
1330 | rnp = rcu_get_root(rsp); | ||
1331 | if (rnp->qsmask == 0) { | ||
1332 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1333 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1334 | } | ||
1272 | } | 1335 | } |
1273 | 1336 | ||
1274 | /* | 1337 | /* |
@@ -1389,7 +1452,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1389 | /* | 1452 | /* |
1390 | * Do softirq processing for the current CPU. | 1453 | * Do softirq processing for the current CPU. |
1391 | */ | 1454 | */ |
1392 | static void rcu_process_callbacks(struct softirq_action *unused) | 1455 | static void rcu_process_callbacks(void) |
1393 | { | 1456 | { |
1394 | /* | 1457 | /* |
1395 | * Memory references from any prior RCU read-side critical sections | 1458 | * Memory references from any prior RCU read-side critical sections |
@@ -1414,6 +1477,347 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1414 | rcu_needs_cpu_flush(); | 1477 | rcu_needs_cpu_flush(); |
1415 | } | 1478 | } |
1416 | 1479 | ||
1480 | /* | ||
1481 | * Wake up the current CPU's kthread. This replaces raise_softirq() | ||
1482 | * in earlier versions of RCU. Note that because we are running on | ||
1483 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task | ||
1484 | * cannot disappear out from under us. | ||
1485 | */ | ||
1486 | static void invoke_rcu_cpu_kthread(void) | ||
1487 | { | ||
1488 | unsigned long flags; | ||
1489 | |||
1490 | local_irq_save(flags); | ||
1491 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
1492 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
1493 | local_irq_restore(flags); | ||
1494 | return; | ||
1495 | } | ||
1496 | wake_up(&__get_cpu_var(rcu_cpu_wq)); | ||
1497 | local_irq_restore(flags); | ||
1498 | } | ||
1499 | |||
1500 | /* | ||
1501 | * Wake up the specified per-rcu_node-structure kthread. | ||
1502 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
1503 | * to do anything to keep them alive. | ||
1504 | */ | ||
1505 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
1506 | { | ||
1507 | struct task_struct *t; | ||
1508 | |||
1509 | t = rnp->node_kthread_task; | ||
1510 | if (t != NULL) | ||
1511 | wake_up_process(t); | ||
1512 | } | ||
1513 | |||
1514 | /* | ||
1515 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
1516 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
1517 | * is not going away. | ||
1518 | */ | ||
1519 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1520 | { | ||
1521 | int policy; | ||
1522 | struct sched_param sp; | ||
1523 | struct task_struct *t; | ||
1524 | |||
1525 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1526 | if (t == NULL) | ||
1527 | return; | ||
1528 | if (to_rt) { | ||
1529 | policy = SCHED_FIFO; | ||
1530 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1531 | } else { | ||
1532 | policy = SCHED_NORMAL; | ||
1533 | sp.sched_priority = 0; | ||
1534 | } | ||
1535 | sched_setscheduler_nocheck(t, policy, &sp); | ||
1536 | } | ||
1537 | |||
1538 | /* | ||
1539 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
1540 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
1541 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
1542 | * the booster kthread. | ||
1543 | */ | ||
1544 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
1545 | { | ||
1546 | unsigned long flags; | ||
1547 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
1548 | struct rcu_node *rnp = rdp->mynode; | ||
1549 | |||
1550 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1551 | rnp->wakemask |= rdp->grpmask; | ||
1552 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1553 | invoke_rcu_node_kthread(rnp); | ||
1554 | } | ||
1555 | |||
1556 | /* | ||
1557 | * Drop to non-real-time priority and yield, but only after posting a | ||
1558 | * timer that will cause us to regain our real-time priority if we | ||
1559 | * remain preempted. Either way, we restore our real-time priority | ||
1560 | * before returning. | ||
1561 | */ | ||
1562 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
1563 | { | ||
1564 | struct sched_param sp; | ||
1565 | struct timer_list yield_timer; | ||
1566 | |||
1567 | setup_timer_on_stack(&yield_timer, f, arg); | ||
1568 | mod_timer(&yield_timer, jiffies + 2); | ||
1569 | sp.sched_priority = 0; | ||
1570 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
1571 | set_user_nice(current, 19); | ||
1572 | schedule(); | ||
1573 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1574 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
1575 | del_timer(&yield_timer); | ||
1576 | } | ||
1577 | |||
1578 | /* | ||
1579 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
1580 | * This can happen while the corresponding CPU is either coming online | ||
1581 | * or going offline. We cannot wait until the CPU is fully online | ||
1582 | * before starting the kthread, because the various notifier functions | ||
1583 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
1584 | * the corresponding CPU is online. | ||
1585 | * | ||
1586 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
1587 | * | ||
1588 | * Caller must disable bh. This function can momentarily enable it. | ||
1589 | */ | ||
1590 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
1591 | { | ||
1592 | while (cpu_is_offline(cpu) || | ||
1593 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
1594 | smp_processor_id() != cpu) { | ||
1595 | if (kthread_should_stop()) | ||
1596 | return 1; | ||
1597 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
1598 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
1599 | local_bh_enable(); | ||
1600 | schedule_timeout_uninterruptible(1); | ||
1601 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
1602 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
1603 | local_bh_disable(); | ||
1604 | } | ||
1605 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1606 | return 0; | ||
1607 | } | ||
1608 | |||
1609 | /* | ||
1610 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
1611 | * earlier RCU softirq. | ||
1612 | */ | ||
1613 | static int rcu_cpu_kthread(void *arg) | ||
1614 | { | ||
1615 | int cpu = (int)(long)arg; | ||
1616 | unsigned long flags; | ||
1617 | int spincnt = 0; | ||
1618 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
1619 | wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu); | ||
1620 | char work; | ||
1621 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
1622 | |||
1623 | for (;;) { | ||
1624 | *statusp = RCU_KTHREAD_WAITING; | ||
1625 | wait_event_interruptible(*wqp, | ||
1626 | *workp != 0 || kthread_should_stop()); | ||
1627 | local_bh_disable(); | ||
1628 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
1629 | local_bh_enable(); | ||
1630 | break; | ||
1631 | } | ||
1632 | *statusp = RCU_KTHREAD_RUNNING; | ||
1633 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
1634 | local_irq_save(flags); | ||
1635 | work = *workp; | ||
1636 | *workp = 0; | ||
1637 | local_irq_restore(flags); | ||
1638 | if (work) | ||
1639 | rcu_process_callbacks(); | ||
1640 | local_bh_enable(); | ||
1641 | if (*workp != 0) | ||
1642 | spincnt++; | ||
1643 | else | ||
1644 | spincnt = 0; | ||
1645 | if (spincnt > 10) { | ||
1646 | *statusp = RCU_KTHREAD_YIELDING; | ||
1647 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
1648 | spincnt = 0; | ||
1649 | } | ||
1650 | } | ||
1651 | *statusp = RCU_KTHREAD_STOPPED; | ||
1652 | return 0; | ||
1653 | } | ||
1654 | |||
1655 | /* | ||
1656 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
1657 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
1658 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
1659 | * attempting to access it during boot, but the locking in kthread_bind() | ||
1660 | * will enforce sufficient ordering. | ||
1661 | */ | ||
1662 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
1663 | { | ||
1664 | struct sched_param sp; | ||
1665 | struct task_struct *t; | ||
1666 | |||
1667 | if (!rcu_kthreads_spawnable || | ||
1668 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
1669 | return 0; | ||
1670 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
1671 | if (IS_ERR(t)) | ||
1672 | return PTR_ERR(t); | ||
1673 | kthread_bind(t, cpu); | ||
1674 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1675 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
1676 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1677 | wake_up_process(t); | ||
1678 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1679 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1680 | return 0; | ||
1681 | } | ||
1682 | |||
1683 | /* | ||
1684 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
1685 | * kthreads when needed. We ignore requests to wake up kthreads | ||
1686 | * for offline CPUs, which is OK because force_quiescent_state() | ||
1687 | * takes care of this case. | ||
1688 | */ | ||
1689 | static int rcu_node_kthread(void *arg) | ||
1690 | { | ||
1691 | int cpu; | ||
1692 | unsigned long flags; | ||
1693 | unsigned long mask; | ||
1694 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1695 | struct sched_param sp; | ||
1696 | struct task_struct *t; | ||
1697 | |||
1698 | for (;;) { | ||
1699 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
1700 | wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0); | ||
1701 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
1702 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1703 | mask = rnp->wakemask; | ||
1704 | rnp->wakemask = 0; | ||
1705 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1706 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
1707 | if ((mask & 0x1) == 0) | ||
1708 | continue; | ||
1709 | preempt_disable(); | ||
1710 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1711 | if (!cpu_online(cpu) || t == NULL) { | ||
1712 | preempt_enable(); | ||
1713 | continue; | ||
1714 | } | ||
1715 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
1716 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1717 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1718 | preempt_enable(); | ||
1719 | } | ||
1720 | } | ||
1721 | /* NOTREACHED */ | ||
1722 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
1723 | return 0; | ||
1724 | } | ||
1725 | |||
1726 | /* | ||
1727 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
1728 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
1729 | * held, so the value of rnp->qsmaskinit will be stable. | ||
1730 | * | ||
1731 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
1732 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
1733 | * this function allows the kthread to execute on any CPU. | ||
1734 | */ | ||
1735 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
1736 | { | ||
1737 | cpumask_var_t cm; | ||
1738 | int cpu; | ||
1739 | unsigned long mask = rnp->qsmaskinit; | ||
1740 | |||
1741 | if (rnp->node_kthread_task == NULL) | ||
1742 | return; | ||
1743 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
1744 | return; | ||
1745 | cpumask_clear(cm); | ||
1746 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
1747 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
1748 | cpumask_set_cpu(cpu, cm); | ||
1749 | if (cpumask_weight(cm) == 0) { | ||
1750 | cpumask_setall(cm); | ||
1751 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
1752 | cpumask_clear_cpu(cpu, cm); | ||
1753 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
1754 | } | ||
1755 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | ||
1756 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
1757 | free_cpumask_var(cm); | ||
1758 | } | ||
1759 | |||
1760 | /* | ||
1761 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
1762 | * Called during boot before online/offline can happen, or, if | ||
1763 | * during runtime, with the main CPU-hotplug locks held. So only | ||
1764 | * one of these can be executing at a time. | ||
1765 | */ | ||
1766 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
1767 | struct rcu_node *rnp) | ||
1768 | { | ||
1769 | unsigned long flags; | ||
1770 | int rnp_index = rnp - &rsp->node[0]; | ||
1771 | struct sched_param sp; | ||
1772 | struct task_struct *t; | ||
1773 | |||
1774 | if (!rcu_kthreads_spawnable || | ||
1775 | rnp->qsmaskinit == 0) | ||
1776 | return 0; | ||
1777 | if (rnp->node_kthread_task == NULL) { | ||
1778 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
1779 | "rcun%d", rnp_index); | ||
1780 | if (IS_ERR(t)) | ||
1781 | return PTR_ERR(t); | ||
1782 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1783 | rnp->node_kthread_task = t; | ||
1784 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1785 | wake_up_process(t); | ||
1786 | sp.sched_priority = 99; | ||
1787 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1788 | } | ||
1789 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
1790 | } | ||
1791 | |||
1792 | /* | ||
1793 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
1794 | */ | ||
1795 | static int __init rcu_spawn_kthreads(void) | ||
1796 | { | ||
1797 | int cpu; | ||
1798 | struct rcu_node *rnp; | ||
1799 | |||
1800 | rcu_kthreads_spawnable = 1; | ||
1801 | for_each_possible_cpu(cpu) { | ||
1802 | init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu)); | ||
1803 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
1804 | if (cpu_online(cpu)) | ||
1805 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1806 | } | ||
1807 | rnp = rcu_get_root(rcu_state); | ||
1808 | init_waitqueue_head(&rnp->node_wq); | ||
1809 | rcu_init_boost_waitqueue(rnp); | ||
1810 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1811 | if (NUM_RCU_NODES > 1) | ||
1812 | rcu_for_each_leaf_node(rcu_state, rnp) { | ||
1813 | init_waitqueue_head(&rnp->node_wq); | ||
1814 | rcu_init_boost_waitqueue(rnp); | ||
1815 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1816 | } | ||
1817 | return 0; | ||
1818 | } | ||
1819 | early_initcall(rcu_spawn_kthreads); | ||
1820 | |||
1417 | static void | 1821 | static void |
1418 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | 1822 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), |
1419 | struct rcu_state *rsp) | 1823 | struct rcu_state *rsp) |
@@ -1439,6 +1843,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1439 | /* Add the callback to our list. */ | 1843 | /* Add the callback to our list. */ |
1440 | *rdp->nxttail[RCU_NEXT_TAIL] = head; | 1844 | *rdp->nxttail[RCU_NEXT_TAIL] = head; |
1441 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | 1845 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; |
1846 | rdp->qlen++; | ||
1847 | |||
1848 | /* If interrupts were disabled, don't dive into RCU core. */ | ||
1849 | if (irqs_disabled_flags(flags)) { | ||
1850 | local_irq_restore(flags); | ||
1851 | return; | ||
1852 | } | ||
1442 | 1853 | ||
1443 | /* | 1854 | /* |
1444 | * Force the grace period if too many callbacks or too long waiting. | 1855 | * Force the grace period if too many callbacks or too long waiting. |
@@ -1447,7 +1858,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1447 | * invoking force_quiescent_state() if the newly enqueued callback | 1858 | * invoking force_quiescent_state() if the newly enqueued callback |
1448 | * is the only one waiting for a grace period to complete. | 1859 | * is the only one waiting for a grace period to complete. |
1449 | */ | 1860 | */ |
1450 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | 1861 | if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { |
1451 | 1862 | ||
1452 | /* Are we ignoring a completed grace period? */ | 1863 | /* Are we ignoring a completed grace period? */ |
1453 | rcu_process_gp_end(rsp, rdp); | 1864 | rcu_process_gp_end(rsp, rdp); |
@@ -1583,7 +1994,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1583 | * or RCU-bh, force a local reschedule. | 1994 | * or RCU-bh, force a local reschedule. |
1584 | */ | 1995 | */ |
1585 | rdp->n_rp_qs_pending++; | 1996 | rdp->n_rp_qs_pending++; |
1586 | if (!rdp->preemptable && | 1997 | if (!rdp->preemptible && |
1587 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, | 1998 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, |
1588 | jiffies)) | 1999 | jiffies)) |
1589 | set_need_resched(); | 2000 | set_need_resched(); |
@@ -1760,7 +2171,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1760 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | 2171 | * that this CPU cannot possibly have any RCU callbacks in flight yet. |
1761 | */ | 2172 | */ |
1762 | static void __cpuinit | 2173 | static void __cpuinit |
1763 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | 2174 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) |
1764 | { | 2175 | { |
1765 | unsigned long flags; | 2176 | unsigned long flags; |
1766 | unsigned long mask; | 2177 | unsigned long mask; |
@@ -1772,7 +2183,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1772 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 2183 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
1773 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 2184 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
1774 | rdp->beenonline = 1; /* We have now been online. */ | 2185 | rdp->beenonline = 1; /* We have now been online. */ |
1775 | rdp->preemptable = preemptable; | 2186 | rdp->preemptible = preemptible; |
1776 | rdp->qlen_last_fqs_check = 0; | 2187 | rdp->qlen_last_fqs_check = 0; |
1777 | rdp->n_force_qs_snap = rsp->n_force_qs; | 2188 | rdp->n_force_qs_snap = rsp->n_force_qs; |
1778 | rdp->blimit = blimit; | 2189 | rdp->blimit = blimit; |
@@ -1813,6 +2224,19 @@ static void __cpuinit rcu_online_cpu(int cpu) | |||
1813 | rcu_preempt_init_percpu_data(cpu); | 2224 | rcu_preempt_init_percpu_data(cpu); |
1814 | } | 2225 | } |
1815 | 2226 | ||
2227 | static void __cpuinit rcu_online_kthreads(int cpu) | ||
2228 | { | ||
2229 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2230 | struct rcu_node *rnp = rdp->mynode; | ||
2231 | |||
2232 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
2233 | if (rcu_kthreads_spawnable) { | ||
2234 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
2235 | if (rnp->node_kthread_task == NULL) | ||
2236 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
2237 | } | ||
2238 | } | ||
2239 | |||
1816 | /* | 2240 | /* |
1817 | * Handle CPU online/offline notification events. | 2241 | * Handle CPU online/offline notification events. |
1818 | */ | 2242 | */ |
@@ -1820,11 +2244,23 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1820 | unsigned long action, void *hcpu) | 2244 | unsigned long action, void *hcpu) |
1821 | { | 2245 | { |
1822 | long cpu = (long)hcpu; | 2246 | long cpu = (long)hcpu; |
2247 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2248 | struct rcu_node *rnp = rdp->mynode; | ||
1823 | 2249 | ||
1824 | switch (action) { | 2250 | switch (action) { |
1825 | case CPU_UP_PREPARE: | 2251 | case CPU_UP_PREPARE: |
1826 | case CPU_UP_PREPARE_FROZEN: | 2252 | case CPU_UP_PREPARE_FROZEN: |
1827 | rcu_online_cpu(cpu); | 2253 | rcu_online_cpu(cpu); |
2254 | rcu_online_kthreads(cpu); | ||
2255 | break; | ||
2256 | case CPU_ONLINE: | ||
2257 | case CPU_DOWN_FAILED: | ||
2258 | rcu_node_kthread_setaffinity(rnp, -1); | ||
2259 | rcu_cpu_kthread_setrt(cpu, 1); | ||
2260 | break; | ||
2261 | case CPU_DOWN_PREPARE: | ||
2262 | rcu_node_kthread_setaffinity(rnp, cpu); | ||
2263 | rcu_cpu_kthread_setrt(cpu, 0); | ||
1828 | break; | 2264 | break; |
1829 | case CPU_DYING: | 2265 | case CPU_DYING: |
1830 | case CPU_DYING_FROZEN: | 2266 | case CPU_DYING_FROZEN: |
@@ -1943,10 +2379,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, | |||
1943 | j / rsp->levelspread[i - 1]; | 2379 | j / rsp->levelspread[i - 1]; |
1944 | } | 2380 | } |
1945 | rnp->level = i; | 2381 | rnp->level = i; |
1946 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | 2382 | INIT_LIST_HEAD(&rnp->blkd_tasks); |
1947 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | ||
1948 | INIT_LIST_HEAD(&rnp->blocked_tasks[2]); | ||
1949 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | ||
1950 | } | 2383 | } |
1951 | } | 2384 | } |
1952 | 2385 | ||
@@ -1968,7 +2401,6 @@ void __init rcu_init(void) | |||
1968 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); | 2401 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); |
1969 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); | 2402 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
1970 | __rcu_init_preempt(); | 2403 | __rcu_init_preempt(); |
1971 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
1972 | 2404 | ||
1973 | /* | 2405 | /* |
1974 | * We don't need protection against CPU-hotplug here because | 2406 | * We don't need protection against CPU-hotplug here because |