diff options
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 527 |
1 files changed, 480 insertions, 47 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index dd4aea806f8e..f07d2f03181a 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -47,6 +47,9 @@ | |||
47 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
48 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | 49 | #include <linux/kernel_stat.h> |
50 | #include <linux/wait.h> | ||
51 | #include <linux/kthread.h> | ||
52 | #include <linux/prefetch.h> | ||
50 | 53 | ||
51 | #include "rcutree.h" | 54 | #include "rcutree.h" |
52 | 55 | ||
@@ -79,10 +82,41 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); | |||
79 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 82 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
80 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 83 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
81 | 84 | ||
85 | static struct rcu_state *rcu_state; | ||
86 | |||
82 | int rcu_scheduler_active __read_mostly; | 87 | int rcu_scheduler_active __read_mostly; |
83 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 88 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
84 | 89 | ||
85 | /* | 90 | /* |
91 | * Control variables for per-CPU and per-rcu_node kthreads. These | ||
92 | * handle all flavors of RCU. | ||
93 | */ | ||
94 | static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); | ||
95 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | ||
96 | DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); | ||
97 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | ||
98 | static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq); | ||
99 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | ||
100 | static char rcu_kthreads_spawnable; | ||
101 | |||
102 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | ||
103 | static void invoke_rcu_cpu_kthread(void); | ||
104 | |||
105 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ | ||
106 | |||
107 | /* | ||
108 | * Track the rcutorture test sequence number and the update version | ||
109 | * number within a given test. The rcutorture_testseq is incremented | ||
110 | * on every rcutorture module load and unload, so has an odd value | ||
111 | * when a test is running. The rcutorture_vernum is set to zero | ||
112 | * when rcutorture starts and is incremented on each rcutorture update. | ||
113 | * These variables enable correlating rcutorture output with the | ||
114 | * RCU tracing information. | ||
115 | */ | ||
116 | unsigned long rcutorture_testseq; | ||
117 | unsigned long rcutorture_vernum; | ||
118 | |||
119 | /* | ||
86 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | 120 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s |
87 | * permit this function to be invoked without holding the root rcu_node | 121 | * permit this function to be invoked without holding the root rcu_node |
88 | * structure's ->lock, but of course results can be subject to change. | 122 | * structure's ->lock, but of course results can be subject to change. |
@@ -124,6 +158,7 @@ void rcu_note_context_switch(int cpu) | |||
124 | rcu_sched_qs(cpu); | 158 | rcu_sched_qs(cpu); |
125 | rcu_preempt_note_context_switch(cpu); | 159 | rcu_preempt_note_context_switch(cpu); |
126 | } | 160 | } |
161 | EXPORT_SYMBOL_GPL(rcu_note_context_switch); | ||
127 | 162 | ||
128 | #ifdef CONFIG_NO_HZ | 163 | #ifdef CONFIG_NO_HZ |
129 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 164 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
@@ -140,10 +175,8 @@ module_param(blimit, int, 0); | |||
140 | module_param(qhimark, int, 0); | 175 | module_param(qhimark, int, 0); |
141 | module_param(qlowmark, int, 0); | 176 | module_param(qlowmark, int, 0); |
142 | 177 | ||
143 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 178 | int rcu_cpu_stall_suppress __read_mostly; |
144 | int rcu_cpu_stall_suppress __read_mostly = RCU_CPU_STALL_SUPPRESS_INIT; | ||
145 | module_param(rcu_cpu_stall_suppress, int, 0644); | 179 | module_param(rcu_cpu_stall_suppress, int, 0644); |
146 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
147 | 180 | ||
148 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 181 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
149 | static int rcu_pending(int cpu); | 182 | static int rcu_pending(int cpu); |
@@ -176,6 +209,31 @@ void rcu_bh_force_quiescent_state(void) | |||
176 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); | 209 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); |
177 | 210 | ||
178 | /* | 211 | /* |
212 | * Record the number of times rcutorture tests have been initiated and | ||
213 | * terminated. This information allows the debugfs tracing stats to be | ||
214 | * correlated to the rcutorture messages, even when the rcutorture module | ||
215 | * is being repeatedly loaded and unloaded. In other words, we cannot | ||
216 | * store this state in rcutorture itself. | ||
217 | */ | ||
218 | void rcutorture_record_test_transition(void) | ||
219 | { | ||
220 | rcutorture_testseq++; | ||
221 | rcutorture_vernum = 0; | ||
222 | } | ||
223 | EXPORT_SYMBOL_GPL(rcutorture_record_test_transition); | ||
224 | |||
225 | /* | ||
226 | * Record the number of writer passes through the current rcutorture test. | ||
227 | * This is also used to correlate debugfs tracing stats with the rcutorture | ||
228 | * messages. | ||
229 | */ | ||
230 | void rcutorture_record_progress(unsigned long vernum) | ||
231 | { | ||
232 | rcutorture_vernum++; | ||
233 | } | ||
234 | EXPORT_SYMBOL_GPL(rcutorture_record_progress); | ||
235 | |||
236 | /* | ||
179 | * Force a quiescent state for RCU-sched. | 237 | * Force a quiescent state for RCU-sched. |
180 | */ | 238 | */ |
181 | void rcu_sched_force_quiescent_state(void) | 239 | void rcu_sched_force_quiescent_state(void) |
@@ -234,8 +292,8 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
234 | return 1; | 292 | return 1; |
235 | } | 293 | } |
236 | 294 | ||
237 | /* If preemptable RCU, no point in sending reschedule IPI. */ | 295 | /* If preemptible RCU, no point in sending reschedule IPI. */ |
238 | if (rdp->preemptable) | 296 | if (rdp->preemptible) |
239 | return 0; | 297 | return 0; |
240 | 298 | ||
241 | /* The CPU is online, so send it a reschedule IPI. */ | 299 | /* The CPU is online, so send it a reschedule IPI. */ |
@@ -450,8 +508,6 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
450 | 508 | ||
451 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 509 | #endif /* #else #ifdef CONFIG_NO_HZ */ |
452 | 510 | ||
453 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
454 | |||
455 | int rcu_cpu_stall_suppress __read_mostly; | 511 | int rcu_cpu_stall_suppress __read_mostly; |
456 | 512 | ||
457 | static void record_gp_stall_check_time(struct rcu_state *rsp) | 513 | static void record_gp_stall_check_time(struct rcu_state *rsp) |
@@ -537,21 +593,24 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
537 | 593 | ||
538 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | 594 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) |
539 | { | 595 | { |
540 | long delta; | 596 | unsigned long j; |
597 | unsigned long js; | ||
541 | struct rcu_node *rnp; | 598 | struct rcu_node *rnp; |
542 | 599 | ||
543 | if (rcu_cpu_stall_suppress) | 600 | if (rcu_cpu_stall_suppress) |
544 | return; | 601 | return; |
545 | delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall); | 602 | j = ACCESS_ONCE(jiffies); |
603 | js = ACCESS_ONCE(rsp->jiffies_stall); | ||
546 | rnp = rdp->mynode; | 604 | rnp = rdp->mynode; |
547 | if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && delta >= 0) { | 605 | if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { |
548 | 606 | ||
549 | /* We haven't checked in, so go dump stack. */ | 607 | /* We haven't checked in, so go dump stack. */ |
550 | print_cpu_stall(rsp); | 608 | print_cpu_stall(rsp); |
551 | 609 | ||
552 | } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) { | 610 | } else if (rcu_gp_in_progress(rsp) && |
611 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) { | ||
553 | 612 | ||
554 | /* They had two time units to dump stack, so complain. */ | 613 | /* They had a few time units to dump stack, so complain. */ |
555 | print_other_cpu_stall(rsp); | 614 | print_other_cpu_stall(rsp); |
556 | } | 615 | } |
557 | } | 616 | } |
@@ -587,26 +646,6 @@ static void __init check_cpu_stall_init(void) | |||
587 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); | 646 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); |
588 | } | 647 | } |
589 | 648 | ||
590 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
591 | |||
592 | static void record_gp_stall_check_time(struct rcu_state *rsp) | ||
593 | { | ||
594 | } | ||
595 | |||
596 | static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | ||
597 | { | ||
598 | } | ||
599 | |||
600 | void rcu_cpu_stall_reset(void) | ||
601 | { | ||
602 | } | ||
603 | |||
604 | static void __init check_cpu_stall_init(void) | ||
605 | { | ||
606 | } | ||
607 | |||
608 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
609 | |||
610 | /* | 649 | /* |
611 | * Update CPU-local rcu_data state to record the newly noticed grace period. | 650 | * Update CPU-local rcu_data state to record the newly noticed grace period. |
612 | * This is used both when we started the grace period and when we notice | 651 | * This is used both when we started the grace period and when we notice |
@@ -809,6 +848,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
809 | rnp->completed = rsp->completed; | 848 | rnp->completed = rsp->completed; |
810 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 849 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
811 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | 850 | rcu_start_gp_per_cpu(rsp, rnp, rdp); |
851 | rcu_preempt_boost_start_gp(rnp); | ||
812 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 852 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
813 | return; | 853 | return; |
814 | } | 854 | } |
@@ -844,6 +884,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
844 | rnp->completed = rsp->completed; | 884 | rnp->completed = rsp->completed; |
845 | if (rnp == rdp->mynode) | 885 | if (rnp == rdp->mynode) |
846 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | 886 | rcu_start_gp_per_cpu(rsp, rnp, rdp); |
887 | rcu_preempt_boost_start_gp(rnp); | ||
847 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 888 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
848 | } | 889 | } |
849 | 890 | ||
@@ -864,7 +905,12 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
864 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) | 905 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
865 | __releases(rcu_get_root(rsp)->lock) | 906 | __releases(rcu_get_root(rsp)->lock) |
866 | { | 907 | { |
908 | unsigned long gp_duration; | ||
909 | |||
867 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 910 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
911 | gp_duration = jiffies - rsp->gp_start; | ||
912 | if (gp_duration > rsp->gp_max) | ||
913 | rsp->gp_max = gp_duration; | ||
868 | rsp->completed = rsp->gpnum; | 914 | rsp->completed = rsp->gpnum; |
869 | rsp->signaled = RCU_GP_IDLE; | 915 | rsp->signaled = RCU_GP_IDLE; |
870 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 916 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
@@ -894,7 +940,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, | |||
894 | return; | 940 | return; |
895 | } | 941 | } |
896 | rnp->qsmask &= ~mask; | 942 | rnp->qsmask &= ~mask; |
897 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | 943 | if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) { |
898 | 944 | ||
899 | /* Other bits still set at this level, so done. */ | 945 | /* Other bits still set at this level, so done. */ |
900 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 946 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -1037,6 +1083,8 @@ static void rcu_send_cbs_to_online(struct rcu_state *rsp) | |||
1037 | /* | 1083 | /* |
1038 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy | 1084 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy |
1039 | * and move all callbacks from the outgoing CPU to the current one. | 1085 | * and move all callbacks from the outgoing CPU to the current one. |
1086 | * There can only be one CPU hotplug operation at a time, so no other | ||
1087 | * CPU can be attempting to update rcu_cpu_kthread_task. | ||
1040 | */ | 1088 | */ |
1041 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | 1089 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) |
1042 | { | 1090 | { |
@@ -1045,6 +1093,14 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1045 | int need_report = 0; | 1093 | int need_report = 0; |
1046 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1094 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
1047 | struct rcu_node *rnp; | 1095 | struct rcu_node *rnp; |
1096 | struct task_struct *t; | ||
1097 | |||
1098 | /* Stop the CPU's kthread. */ | ||
1099 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1100 | if (t != NULL) { | ||
1101 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
1102 | kthread_stop(t); | ||
1103 | } | ||
1048 | 1104 | ||
1049 | /* Exclude any attempts to start a new grace period. */ | 1105 | /* Exclude any attempts to start a new grace period. */ |
1050 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1106 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
@@ -1082,6 +1138,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1082 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1138 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1083 | if (need_report & RCU_OFL_TASKS_EXP_GP) | 1139 | if (need_report & RCU_OFL_TASKS_EXP_GP) |
1084 | rcu_report_exp_rnp(rsp, rnp); | 1140 | rcu_report_exp_rnp(rsp, rnp); |
1141 | rcu_node_kthread_setaffinity(rnp, -1); | ||
1085 | } | 1142 | } |
1086 | 1143 | ||
1087 | /* | 1144 | /* |
@@ -1143,7 +1200,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1143 | next = list->next; | 1200 | next = list->next; |
1144 | prefetch(next); | 1201 | prefetch(next); |
1145 | debug_rcu_head_unqueue(list); | 1202 | debug_rcu_head_unqueue(list); |
1146 | list->func(list); | 1203 | __rcu_reclaim(list); |
1147 | list = next; | 1204 | list = next; |
1148 | if (++count >= rdp->blimit) | 1205 | if (++count >= rdp->blimit) |
1149 | break; | 1206 | break; |
@@ -1179,7 +1236,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1179 | 1236 | ||
1180 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1237 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
1181 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 1238 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
1182 | raise_softirq(RCU_SOFTIRQ); | 1239 | invoke_rcu_cpu_kthread(); |
1183 | } | 1240 | } |
1184 | 1241 | ||
1185 | /* | 1242 | /* |
@@ -1225,7 +1282,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
1225 | } | 1282 | } |
1226 | rcu_preempt_check_callbacks(cpu); | 1283 | rcu_preempt_check_callbacks(cpu); |
1227 | if (rcu_pending(cpu)) | 1284 | if (rcu_pending(cpu)) |
1228 | raise_softirq(RCU_SOFTIRQ); | 1285 | invoke_rcu_cpu_kthread(); |
1229 | } | 1286 | } |
1230 | 1287 | ||
1231 | #ifdef CONFIG_SMP | 1288 | #ifdef CONFIG_SMP |
@@ -1233,6 +1290,8 @@ void rcu_check_callbacks(int cpu, int user) | |||
1233 | /* | 1290 | /* |
1234 | * Scan the leaf rcu_node structures, processing dyntick state for any that | 1291 | * Scan the leaf rcu_node structures, processing dyntick state for any that |
1235 | * have not yet encountered a quiescent state, using the function specified. | 1292 | * have not yet encountered a quiescent state, using the function specified. |
1293 | * Also initiate boosting for any threads blocked on the root rcu_node. | ||
1294 | * | ||
1236 | * The caller must have suppressed start of new grace periods. | 1295 | * The caller must have suppressed start of new grace periods. |
1237 | */ | 1296 | */ |
1238 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | 1297 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) |
@@ -1251,7 +1310,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1251 | return; | 1310 | return; |
1252 | } | 1311 | } |
1253 | if (rnp->qsmask == 0) { | 1312 | if (rnp->qsmask == 0) { |
1254 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1313 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
1255 | continue; | 1314 | continue; |
1256 | } | 1315 | } |
1257 | cpu = rnp->grplo; | 1316 | cpu = rnp->grplo; |
@@ -1269,6 +1328,11 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1269 | } | 1328 | } |
1270 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1329 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1271 | } | 1330 | } |
1331 | rnp = rcu_get_root(rsp); | ||
1332 | if (rnp->qsmask == 0) { | ||
1333 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1334 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1335 | } | ||
1272 | } | 1336 | } |
1273 | 1337 | ||
1274 | /* | 1338 | /* |
@@ -1389,7 +1453,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1389 | /* | 1453 | /* |
1390 | * Do softirq processing for the current CPU. | 1454 | * Do softirq processing for the current CPU. |
1391 | */ | 1455 | */ |
1392 | static void rcu_process_callbacks(struct softirq_action *unused) | 1456 | static void rcu_process_callbacks(void) |
1393 | { | 1457 | { |
1394 | /* | 1458 | /* |
1395 | * Memory references from any prior RCU read-side critical sections | 1459 | * Memory references from any prior RCU read-side critical sections |
@@ -1414,6 +1478,347 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1414 | rcu_needs_cpu_flush(); | 1478 | rcu_needs_cpu_flush(); |
1415 | } | 1479 | } |
1416 | 1480 | ||
1481 | /* | ||
1482 | * Wake up the current CPU's kthread. This replaces raise_softirq() | ||
1483 | * in earlier versions of RCU. Note that because we are running on | ||
1484 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task | ||
1485 | * cannot disappear out from under us. | ||
1486 | */ | ||
1487 | static void invoke_rcu_cpu_kthread(void) | ||
1488 | { | ||
1489 | unsigned long flags; | ||
1490 | |||
1491 | local_irq_save(flags); | ||
1492 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
1493 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
1494 | local_irq_restore(flags); | ||
1495 | return; | ||
1496 | } | ||
1497 | wake_up(&__get_cpu_var(rcu_cpu_wq)); | ||
1498 | local_irq_restore(flags); | ||
1499 | } | ||
1500 | |||
1501 | /* | ||
1502 | * Wake up the specified per-rcu_node-structure kthread. | ||
1503 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
1504 | * to do anything to keep them alive. | ||
1505 | */ | ||
1506 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
1507 | { | ||
1508 | struct task_struct *t; | ||
1509 | |||
1510 | t = rnp->node_kthread_task; | ||
1511 | if (t != NULL) | ||
1512 | wake_up_process(t); | ||
1513 | } | ||
1514 | |||
1515 | /* | ||
1516 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
1517 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
1518 | * is not going away. | ||
1519 | */ | ||
1520 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1521 | { | ||
1522 | int policy; | ||
1523 | struct sched_param sp; | ||
1524 | struct task_struct *t; | ||
1525 | |||
1526 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1527 | if (t == NULL) | ||
1528 | return; | ||
1529 | if (to_rt) { | ||
1530 | policy = SCHED_FIFO; | ||
1531 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1532 | } else { | ||
1533 | policy = SCHED_NORMAL; | ||
1534 | sp.sched_priority = 0; | ||
1535 | } | ||
1536 | sched_setscheduler_nocheck(t, policy, &sp); | ||
1537 | } | ||
1538 | |||
1539 | /* | ||
1540 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
1541 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
1542 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
1543 | * the booster kthread. | ||
1544 | */ | ||
1545 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
1546 | { | ||
1547 | unsigned long flags; | ||
1548 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
1549 | struct rcu_node *rnp = rdp->mynode; | ||
1550 | |||
1551 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1552 | rnp->wakemask |= rdp->grpmask; | ||
1553 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1554 | invoke_rcu_node_kthread(rnp); | ||
1555 | } | ||
1556 | |||
1557 | /* | ||
1558 | * Drop to non-real-time priority and yield, but only after posting a | ||
1559 | * timer that will cause us to regain our real-time priority if we | ||
1560 | * remain preempted. Either way, we restore our real-time priority | ||
1561 | * before returning. | ||
1562 | */ | ||
1563 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
1564 | { | ||
1565 | struct sched_param sp; | ||
1566 | struct timer_list yield_timer; | ||
1567 | |||
1568 | setup_timer_on_stack(&yield_timer, f, arg); | ||
1569 | mod_timer(&yield_timer, jiffies + 2); | ||
1570 | sp.sched_priority = 0; | ||
1571 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
1572 | set_user_nice(current, 19); | ||
1573 | schedule(); | ||
1574 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1575 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
1576 | del_timer(&yield_timer); | ||
1577 | } | ||
1578 | |||
1579 | /* | ||
1580 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
1581 | * This can happen while the corresponding CPU is either coming online | ||
1582 | * or going offline. We cannot wait until the CPU is fully online | ||
1583 | * before starting the kthread, because the various notifier functions | ||
1584 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
1585 | * the corresponding CPU is online. | ||
1586 | * | ||
1587 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
1588 | * | ||
1589 | * Caller must disable bh. This function can momentarily enable it. | ||
1590 | */ | ||
1591 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
1592 | { | ||
1593 | while (cpu_is_offline(cpu) || | ||
1594 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
1595 | smp_processor_id() != cpu) { | ||
1596 | if (kthread_should_stop()) | ||
1597 | return 1; | ||
1598 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
1599 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
1600 | local_bh_enable(); | ||
1601 | schedule_timeout_uninterruptible(1); | ||
1602 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
1603 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
1604 | local_bh_disable(); | ||
1605 | } | ||
1606 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1607 | return 0; | ||
1608 | } | ||
1609 | |||
1610 | /* | ||
1611 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
1612 | * earlier RCU softirq. | ||
1613 | */ | ||
1614 | static int rcu_cpu_kthread(void *arg) | ||
1615 | { | ||
1616 | int cpu = (int)(long)arg; | ||
1617 | unsigned long flags; | ||
1618 | int spincnt = 0; | ||
1619 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
1620 | wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu); | ||
1621 | char work; | ||
1622 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
1623 | |||
1624 | for (;;) { | ||
1625 | *statusp = RCU_KTHREAD_WAITING; | ||
1626 | wait_event_interruptible(*wqp, | ||
1627 | *workp != 0 || kthread_should_stop()); | ||
1628 | local_bh_disable(); | ||
1629 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
1630 | local_bh_enable(); | ||
1631 | break; | ||
1632 | } | ||
1633 | *statusp = RCU_KTHREAD_RUNNING; | ||
1634 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
1635 | local_irq_save(flags); | ||
1636 | work = *workp; | ||
1637 | *workp = 0; | ||
1638 | local_irq_restore(flags); | ||
1639 | if (work) | ||
1640 | rcu_process_callbacks(); | ||
1641 | local_bh_enable(); | ||
1642 | if (*workp != 0) | ||
1643 | spincnt++; | ||
1644 | else | ||
1645 | spincnt = 0; | ||
1646 | if (spincnt > 10) { | ||
1647 | *statusp = RCU_KTHREAD_YIELDING; | ||
1648 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
1649 | spincnt = 0; | ||
1650 | } | ||
1651 | } | ||
1652 | *statusp = RCU_KTHREAD_STOPPED; | ||
1653 | return 0; | ||
1654 | } | ||
1655 | |||
1656 | /* | ||
1657 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
1658 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
1659 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
1660 | * attempting to access it during boot, but the locking in kthread_bind() | ||
1661 | * will enforce sufficient ordering. | ||
1662 | */ | ||
1663 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
1664 | { | ||
1665 | struct sched_param sp; | ||
1666 | struct task_struct *t; | ||
1667 | |||
1668 | if (!rcu_kthreads_spawnable || | ||
1669 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
1670 | return 0; | ||
1671 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
1672 | if (IS_ERR(t)) | ||
1673 | return PTR_ERR(t); | ||
1674 | kthread_bind(t, cpu); | ||
1675 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1676 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
1677 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1678 | wake_up_process(t); | ||
1679 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1680 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1681 | return 0; | ||
1682 | } | ||
1683 | |||
1684 | /* | ||
1685 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
1686 | * kthreads when needed. We ignore requests to wake up kthreads | ||
1687 | * for offline CPUs, which is OK because force_quiescent_state() | ||
1688 | * takes care of this case. | ||
1689 | */ | ||
1690 | static int rcu_node_kthread(void *arg) | ||
1691 | { | ||
1692 | int cpu; | ||
1693 | unsigned long flags; | ||
1694 | unsigned long mask; | ||
1695 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1696 | struct sched_param sp; | ||
1697 | struct task_struct *t; | ||
1698 | |||
1699 | for (;;) { | ||
1700 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
1701 | wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0); | ||
1702 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
1703 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1704 | mask = rnp->wakemask; | ||
1705 | rnp->wakemask = 0; | ||
1706 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1707 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
1708 | if ((mask & 0x1) == 0) | ||
1709 | continue; | ||
1710 | preempt_disable(); | ||
1711 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1712 | if (!cpu_online(cpu) || t == NULL) { | ||
1713 | preempt_enable(); | ||
1714 | continue; | ||
1715 | } | ||
1716 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
1717 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1718 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1719 | preempt_enable(); | ||
1720 | } | ||
1721 | } | ||
1722 | /* NOTREACHED */ | ||
1723 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
1724 | return 0; | ||
1725 | } | ||
1726 | |||
1727 | /* | ||
1728 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
1729 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
1730 | * held, so the value of rnp->qsmaskinit will be stable. | ||
1731 | * | ||
1732 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
1733 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
1734 | * this function allows the kthread to execute on any CPU. | ||
1735 | */ | ||
1736 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
1737 | { | ||
1738 | cpumask_var_t cm; | ||
1739 | int cpu; | ||
1740 | unsigned long mask = rnp->qsmaskinit; | ||
1741 | |||
1742 | if (rnp->node_kthread_task == NULL) | ||
1743 | return; | ||
1744 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
1745 | return; | ||
1746 | cpumask_clear(cm); | ||
1747 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
1748 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
1749 | cpumask_set_cpu(cpu, cm); | ||
1750 | if (cpumask_weight(cm) == 0) { | ||
1751 | cpumask_setall(cm); | ||
1752 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
1753 | cpumask_clear_cpu(cpu, cm); | ||
1754 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
1755 | } | ||
1756 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | ||
1757 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
1758 | free_cpumask_var(cm); | ||
1759 | } | ||
1760 | |||
1761 | /* | ||
1762 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
1763 | * Called during boot before online/offline can happen, or, if | ||
1764 | * during runtime, with the main CPU-hotplug locks held. So only | ||
1765 | * one of these can be executing at a time. | ||
1766 | */ | ||
1767 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
1768 | struct rcu_node *rnp) | ||
1769 | { | ||
1770 | unsigned long flags; | ||
1771 | int rnp_index = rnp - &rsp->node[0]; | ||
1772 | struct sched_param sp; | ||
1773 | struct task_struct *t; | ||
1774 | |||
1775 | if (!rcu_kthreads_spawnable || | ||
1776 | rnp->qsmaskinit == 0) | ||
1777 | return 0; | ||
1778 | if (rnp->node_kthread_task == NULL) { | ||
1779 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
1780 | "rcun%d", rnp_index); | ||
1781 | if (IS_ERR(t)) | ||
1782 | return PTR_ERR(t); | ||
1783 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1784 | rnp->node_kthread_task = t; | ||
1785 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1786 | wake_up_process(t); | ||
1787 | sp.sched_priority = 99; | ||
1788 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1789 | } | ||
1790 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
1791 | } | ||
1792 | |||
1793 | /* | ||
1794 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
1795 | */ | ||
1796 | static int __init rcu_spawn_kthreads(void) | ||
1797 | { | ||
1798 | int cpu; | ||
1799 | struct rcu_node *rnp; | ||
1800 | |||
1801 | rcu_kthreads_spawnable = 1; | ||
1802 | for_each_possible_cpu(cpu) { | ||
1803 | init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu)); | ||
1804 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
1805 | if (cpu_online(cpu)) | ||
1806 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1807 | } | ||
1808 | rnp = rcu_get_root(rcu_state); | ||
1809 | init_waitqueue_head(&rnp->node_wq); | ||
1810 | rcu_init_boost_waitqueue(rnp); | ||
1811 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1812 | if (NUM_RCU_NODES > 1) | ||
1813 | rcu_for_each_leaf_node(rcu_state, rnp) { | ||
1814 | init_waitqueue_head(&rnp->node_wq); | ||
1815 | rcu_init_boost_waitqueue(rnp); | ||
1816 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1817 | } | ||
1818 | return 0; | ||
1819 | } | ||
1820 | early_initcall(rcu_spawn_kthreads); | ||
1821 | |||
1417 | static void | 1822 | static void |
1418 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | 1823 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), |
1419 | struct rcu_state *rsp) | 1824 | struct rcu_state *rsp) |
@@ -1439,6 +1844,13 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1439 | /* Add the callback to our list. */ | 1844 | /* Add the callback to our list. */ |
1440 | *rdp->nxttail[RCU_NEXT_TAIL] = head; | 1845 | *rdp->nxttail[RCU_NEXT_TAIL] = head; |
1441 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | 1846 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; |
1847 | rdp->qlen++; | ||
1848 | |||
1849 | /* If interrupts were disabled, don't dive into RCU core. */ | ||
1850 | if (irqs_disabled_flags(flags)) { | ||
1851 | local_irq_restore(flags); | ||
1852 | return; | ||
1853 | } | ||
1442 | 1854 | ||
1443 | /* | 1855 | /* |
1444 | * Force the grace period if too many callbacks or too long waiting. | 1856 | * Force the grace period if too many callbacks or too long waiting. |
@@ -1447,7 +1859,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1447 | * invoking force_quiescent_state() if the newly enqueued callback | 1859 | * invoking force_quiescent_state() if the newly enqueued callback |
1448 | * is the only one waiting for a grace period to complete. | 1860 | * is the only one waiting for a grace period to complete. |
1449 | */ | 1861 | */ |
1450 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | 1862 | if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { |
1451 | 1863 | ||
1452 | /* Are we ignoring a completed grace period? */ | 1864 | /* Are we ignoring a completed grace period? */ |
1453 | rcu_process_gp_end(rsp, rdp); | 1865 | rcu_process_gp_end(rsp, rdp); |
@@ -1583,7 +1995,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1583 | * or RCU-bh, force a local reschedule. | 1995 | * or RCU-bh, force a local reschedule. |
1584 | */ | 1996 | */ |
1585 | rdp->n_rp_qs_pending++; | 1997 | rdp->n_rp_qs_pending++; |
1586 | if (!rdp->preemptable && | 1998 | if (!rdp->preemptible && |
1587 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, | 1999 | ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, |
1588 | jiffies)) | 2000 | jiffies)) |
1589 | set_need_resched(); | 2001 | set_need_resched(); |
@@ -1760,7 +2172,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1760 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | 2172 | * that this CPU cannot possibly have any RCU callbacks in flight yet. |
1761 | */ | 2173 | */ |
1762 | static void __cpuinit | 2174 | static void __cpuinit |
1763 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | 2175 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) |
1764 | { | 2176 | { |
1765 | unsigned long flags; | 2177 | unsigned long flags; |
1766 | unsigned long mask; | 2178 | unsigned long mask; |
@@ -1772,7 +2184,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | |||
1772 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | 2184 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ |
1773 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | 2185 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ |
1774 | rdp->beenonline = 1; /* We have now been online. */ | 2186 | rdp->beenonline = 1; /* We have now been online. */ |
1775 | rdp->preemptable = preemptable; | 2187 | rdp->preemptible = preemptible; |
1776 | rdp->qlen_last_fqs_check = 0; | 2188 | rdp->qlen_last_fqs_check = 0; |
1777 | rdp->n_force_qs_snap = rsp->n_force_qs; | 2189 | rdp->n_force_qs_snap = rsp->n_force_qs; |
1778 | rdp->blimit = blimit; | 2190 | rdp->blimit = blimit; |
@@ -1813,6 +2225,19 @@ static void __cpuinit rcu_online_cpu(int cpu) | |||
1813 | rcu_preempt_init_percpu_data(cpu); | 2225 | rcu_preempt_init_percpu_data(cpu); |
1814 | } | 2226 | } |
1815 | 2227 | ||
2228 | static void __cpuinit rcu_online_kthreads(int cpu) | ||
2229 | { | ||
2230 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2231 | struct rcu_node *rnp = rdp->mynode; | ||
2232 | |||
2233 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
2234 | if (rcu_kthreads_spawnable) { | ||
2235 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
2236 | if (rnp->node_kthread_task == NULL) | ||
2237 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
2238 | } | ||
2239 | } | ||
2240 | |||
1816 | /* | 2241 | /* |
1817 | * Handle CPU online/offline notification events. | 2242 | * Handle CPU online/offline notification events. |
1818 | */ | 2243 | */ |
@@ -1820,11 +2245,23 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1820 | unsigned long action, void *hcpu) | 2245 | unsigned long action, void *hcpu) |
1821 | { | 2246 | { |
1822 | long cpu = (long)hcpu; | 2247 | long cpu = (long)hcpu; |
2248 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2249 | struct rcu_node *rnp = rdp->mynode; | ||
1823 | 2250 | ||
1824 | switch (action) { | 2251 | switch (action) { |
1825 | case CPU_UP_PREPARE: | 2252 | case CPU_UP_PREPARE: |
1826 | case CPU_UP_PREPARE_FROZEN: | 2253 | case CPU_UP_PREPARE_FROZEN: |
1827 | rcu_online_cpu(cpu); | 2254 | rcu_online_cpu(cpu); |
2255 | rcu_online_kthreads(cpu); | ||
2256 | break; | ||
2257 | case CPU_ONLINE: | ||
2258 | case CPU_DOWN_FAILED: | ||
2259 | rcu_node_kthread_setaffinity(rnp, -1); | ||
2260 | rcu_cpu_kthread_setrt(cpu, 1); | ||
2261 | break; | ||
2262 | case CPU_DOWN_PREPARE: | ||
2263 | rcu_node_kthread_setaffinity(rnp, cpu); | ||
2264 | rcu_cpu_kthread_setrt(cpu, 0); | ||
1828 | break; | 2265 | break; |
1829 | case CPU_DYING: | 2266 | case CPU_DYING: |
1830 | case CPU_DYING_FROZEN: | 2267 | case CPU_DYING_FROZEN: |
@@ -1943,10 +2380,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, | |||
1943 | j / rsp->levelspread[i - 1]; | 2380 | j / rsp->levelspread[i - 1]; |
1944 | } | 2381 | } |
1945 | rnp->level = i; | 2382 | rnp->level = i; |
1946 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | 2383 | INIT_LIST_HEAD(&rnp->blkd_tasks); |
1947 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | ||
1948 | INIT_LIST_HEAD(&rnp->blocked_tasks[2]); | ||
1949 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | ||
1950 | } | 2384 | } |
1951 | } | 2385 | } |
1952 | 2386 | ||
@@ -1968,7 +2402,6 @@ void __init rcu_init(void) | |||
1968 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); | 2402 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); |
1969 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); | 2403 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
1970 | __rcu_init_preempt(); | 2404 | __rcu_init_preempt(); |
1971 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
1972 | 2405 | ||
1973 | /* | 2406 | /* |
1974 | * We don't need protection against CPU-hotplug here because | 2407 | * We don't need protection against CPU-hotplug here because |