diff options
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 1051 |
1 files changed, 711 insertions, 340 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 97ce31579ec0..53ae9598f798 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -25,7 +25,7 @@ | |||
25 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 25 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
26 | * | 26 | * |
27 | * For detailed explanation of Read-Copy Update mechanism see - | 27 | * For detailed explanation of Read-Copy Update mechanism see - |
28 | * Documentation/RCU | 28 | * Documentation/RCU |
29 | */ | 29 | */ |
30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/rcupdate.h> | 35 | #include <linux/rcupdate.h> |
36 | #include <linux/interrupt.h> | 36 | #include <linux/interrupt.h> |
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/nmi.h> | ||
38 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
39 | #include <linux/bitops.h> | 40 | #include <linux/bitops.h> |
40 | #include <linux/module.h> | 41 | #include <linux/module.h> |
@@ -45,39 +46,80 @@ | |||
45 | #include <linux/cpu.h> | 46 | #include <linux/cpu.h> |
46 | #include <linux/mutex.h> | 47 | #include <linux/mutex.h> |
47 | #include <linux/time.h> | 48 | #include <linux/time.h> |
49 | #include <linux/kernel_stat.h> | ||
48 | 50 | ||
49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 51 | #include "rcutree.h" |
50 | static struct lock_class_key rcu_lock_key; | ||
51 | struct lockdep_map rcu_lock_map = | ||
52 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); | ||
53 | EXPORT_SYMBOL_GPL(rcu_lock_map); | ||
54 | #endif | ||
55 | 52 | ||
56 | /* Data structures. */ | 53 | /* Data structures. */ |
57 | 54 | ||
55 | static struct lock_class_key rcu_node_class[NUM_RCU_LVLS]; | ||
56 | |||
58 | #define RCU_STATE_INITIALIZER(name) { \ | 57 | #define RCU_STATE_INITIALIZER(name) { \ |
59 | .level = { &name.node[0] }, \ | 58 | .level = { &name.node[0] }, \ |
60 | .levelcnt = { \ | 59 | .levelcnt = { \ |
61 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ | 60 | NUM_RCU_LVL_0, /* root of hierarchy. */ \ |
62 | NUM_RCU_LVL_1, \ | 61 | NUM_RCU_LVL_1, \ |
63 | NUM_RCU_LVL_2, \ | 62 | NUM_RCU_LVL_2, \ |
64 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ | 63 | NUM_RCU_LVL_3, \ |
64 | NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \ | ||
65 | }, \ | 65 | }, \ |
66 | .signaled = RCU_SIGNAL_INIT, \ | 66 | .signaled = RCU_GP_IDLE, \ |
67 | .gpnum = -300, \ | 67 | .gpnum = -300, \ |
68 | .completed = -300, \ | 68 | .completed = -300, \ |
69 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ | 69 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ |
70 | .orphan_cbs_list = NULL, \ | ||
71 | .orphan_cbs_tail = &name.orphan_cbs_list, \ | ||
72 | .orphan_qlen = 0, \ | ||
70 | .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ | 73 | .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ |
71 | .n_force_qs = 0, \ | 74 | .n_force_qs = 0, \ |
72 | .n_force_qs_ngp = 0, \ | 75 | .n_force_qs_ngp = 0, \ |
73 | } | 76 | } |
74 | 77 | ||
75 | struct rcu_state rcu_state = RCU_STATE_INITIALIZER(rcu_state); | 78 | struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched_state); |
76 | DEFINE_PER_CPU(struct rcu_data, rcu_data); | 79 | DEFINE_PER_CPU(struct rcu_data, rcu_sched_data); |
77 | 80 | ||
78 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); | 81 | struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); |
79 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); | 82 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); |
80 | 83 | ||
84 | static int rcu_scheduler_active __read_mostly; | ||
85 | |||
86 | |||
87 | /* | ||
88 | * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s | ||
89 | * permit this function to be invoked without holding the root rcu_node | ||
90 | * structure's ->lock, but of course results can be subject to change. | ||
91 | */ | ||
92 | static int rcu_gp_in_progress(struct rcu_state *rsp) | ||
93 | { | ||
94 | return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum); | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * Note a quiescent state. Because we do not need to know | ||
99 | * how many quiescent states passed, just if there was at least | ||
100 | * one since the start of the grace period, this just sets a flag. | ||
101 | */ | ||
102 | void rcu_sched_qs(int cpu) | ||
103 | { | ||
104 | struct rcu_data *rdp; | ||
105 | |||
106 | rdp = &per_cpu(rcu_sched_data, cpu); | ||
107 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | ||
108 | barrier(); | ||
109 | rdp->passed_quiesc = 1; | ||
110 | rcu_preempt_note_context_switch(cpu); | ||
111 | } | ||
112 | |||
113 | void rcu_bh_qs(int cpu) | ||
114 | { | ||
115 | struct rcu_data *rdp; | ||
116 | |||
117 | rdp = &per_cpu(rcu_bh_data, cpu); | ||
118 | rdp->passed_quiesc_completed = rdp->gpnum - 1; | ||
119 | barrier(); | ||
120 | rdp->passed_quiesc = 1; | ||
121 | } | ||
122 | |||
81 | #ifdef CONFIG_NO_HZ | 123 | #ifdef CONFIG_NO_HZ |
82 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { | 124 | DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { |
83 | .dynticks_nesting = 1, | 125 | .dynticks_nesting = 1, |
@@ -89,16 +131,21 @@ static int blimit = 10; /* Maximum callbacks per softirq. */ | |||
89 | static int qhimark = 10000; /* If this many pending, ignore blimit. */ | 131 | static int qhimark = 10000; /* If this many pending, ignore blimit. */ |
90 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ | 132 | static int qlowmark = 100; /* Once only this many pending, use blimit. */ |
91 | 133 | ||
134 | module_param(blimit, int, 0); | ||
135 | module_param(qhimark, int, 0); | ||
136 | module_param(qlowmark, int, 0); | ||
137 | |||
92 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); | 138 | static void force_quiescent_state(struct rcu_state *rsp, int relaxed); |
139 | static int rcu_pending(int cpu); | ||
93 | 140 | ||
94 | /* | 141 | /* |
95 | * Return the number of RCU batches processed thus far for debug & stats. | 142 | * Return the number of RCU-sched batches processed thus far for debug & stats. |
96 | */ | 143 | */ |
97 | long rcu_batches_completed(void) | 144 | long rcu_batches_completed_sched(void) |
98 | { | 145 | { |
99 | return rcu_state.completed; | 146 | return rcu_sched_state.completed; |
100 | } | 147 | } |
101 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 148 | EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); |
102 | 149 | ||
103 | /* | 150 | /* |
104 | * Return the number of RCU BH batches processed thus far for debug & stats. | 151 | * Return the number of RCU BH batches processed thus far for debug & stats. |
@@ -124,9 +171,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) | |||
124 | static int | 171 | static int |
125 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) | 172 | cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) |
126 | { | 173 | { |
127 | /* ACCESS_ONCE() because we are accessing outside of lock. */ | 174 | return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); |
128 | return *rdp->nxttail[RCU_DONE_TAIL] && | ||
129 | ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum); | ||
130 | } | 175 | } |
131 | 176 | ||
132 | /* | 177 | /* |
@@ -161,6 +206,10 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
161 | return 1; | 206 | return 1; |
162 | } | 207 | } |
163 | 208 | ||
209 | /* If preemptable RCU, no point in sending reschedule IPI. */ | ||
210 | if (rdp->preemptable) | ||
211 | return 0; | ||
212 | |||
164 | /* The CPU is online, so send it a reschedule IPI. */ | 213 | /* The CPU is online, so send it a reschedule IPI. */ |
165 | if (rdp->cpu != smp_processor_id()) | 214 | if (rdp->cpu != smp_processor_id()) |
166 | smp_send_reschedule(rdp->cpu); | 215 | smp_send_reschedule(rdp->cpu); |
@@ -173,7 +222,6 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp) | |||
173 | #endif /* #ifdef CONFIG_SMP */ | 222 | #endif /* #ifdef CONFIG_SMP */ |
174 | 223 | ||
175 | #ifdef CONFIG_NO_HZ | 224 | #ifdef CONFIG_NO_HZ |
176 | static DEFINE_RATELIMIT_STATE(rcu_rs, 10 * HZ, 5); | ||
177 | 225 | ||
178 | /** | 226 | /** |
179 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz | 227 | * rcu_enter_nohz - inform RCU that current CPU is entering nohz |
@@ -193,7 +241,7 @@ void rcu_enter_nohz(void) | |||
193 | rdtp = &__get_cpu_var(rcu_dynticks); | 241 | rdtp = &__get_cpu_var(rcu_dynticks); |
194 | rdtp->dynticks++; | 242 | rdtp->dynticks++; |
195 | rdtp->dynticks_nesting--; | 243 | rdtp->dynticks_nesting--; |
196 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 244 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
197 | local_irq_restore(flags); | 245 | local_irq_restore(flags); |
198 | } | 246 | } |
199 | 247 | ||
@@ -212,7 +260,7 @@ void rcu_exit_nohz(void) | |||
212 | rdtp = &__get_cpu_var(rcu_dynticks); | 260 | rdtp = &__get_cpu_var(rcu_dynticks); |
213 | rdtp->dynticks++; | 261 | rdtp->dynticks++; |
214 | rdtp->dynticks_nesting++; | 262 | rdtp->dynticks_nesting++; |
215 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 263 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
216 | local_irq_restore(flags); | 264 | local_irq_restore(flags); |
217 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 265 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
218 | } | 266 | } |
@@ -231,7 +279,7 @@ void rcu_nmi_enter(void) | |||
231 | if (rdtp->dynticks & 0x1) | 279 | if (rdtp->dynticks & 0x1) |
232 | return; | 280 | return; |
233 | rdtp->dynticks_nmi++; | 281 | rdtp->dynticks_nmi++; |
234 | WARN_ON_RATELIMIT(!(rdtp->dynticks_nmi & 0x1), &rcu_rs); | 282 | WARN_ON_ONCE(!(rdtp->dynticks_nmi & 0x1)); |
235 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 283 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
236 | } | 284 | } |
237 | 285 | ||
@@ -250,7 +298,7 @@ void rcu_nmi_exit(void) | |||
250 | return; | 298 | return; |
251 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 299 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
252 | rdtp->dynticks_nmi++; | 300 | rdtp->dynticks_nmi++; |
253 | WARN_ON_RATELIMIT(rdtp->dynticks_nmi & 0x1, &rcu_rs); | 301 | WARN_ON_ONCE(rdtp->dynticks_nmi & 0x1); |
254 | } | 302 | } |
255 | 303 | ||
256 | /** | 304 | /** |
@@ -266,7 +314,7 @@ void rcu_irq_enter(void) | |||
266 | if (rdtp->dynticks_nesting++) | 314 | if (rdtp->dynticks_nesting++) |
267 | return; | 315 | return; |
268 | rdtp->dynticks++; | 316 | rdtp->dynticks++; |
269 | WARN_ON_RATELIMIT(!(rdtp->dynticks & 0x1), &rcu_rs); | 317 | WARN_ON_ONCE(!(rdtp->dynticks & 0x1)); |
270 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | 318 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ |
271 | } | 319 | } |
272 | 320 | ||
@@ -285,39 +333,20 @@ void rcu_irq_exit(void) | |||
285 | return; | 333 | return; |
286 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 334 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ |
287 | rdtp->dynticks++; | 335 | rdtp->dynticks++; |
288 | WARN_ON_RATELIMIT(rdtp->dynticks & 0x1, &rcu_rs); | 336 | WARN_ON_ONCE(rdtp->dynticks & 0x1); |
289 | 337 | ||
290 | /* If the interrupt queued a callback, get out of dyntick mode. */ | 338 | /* If the interrupt queued a callback, get out of dyntick mode. */ |
291 | if (__get_cpu_var(rcu_data).nxtlist || | 339 | if (__get_cpu_var(rcu_sched_data).nxtlist || |
292 | __get_cpu_var(rcu_bh_data).nxtlist) | 340 | __get_cpu_var(rcu_bh_data).nxtlist) |
293 | set_need_resched(); | 341 | set_need_resched(); |
294 | } | 342 | } |
295 | 343 | ||
296 | /* | ||
297 | * Record the specified "completed" value, which is later used to validate | ||
298 | * dynticks counter manipulations. Specify "rsp->completed - 1" to | ||
299 | * unconditionally invalidate any future dynticks manipulations (which is | ||
300 | * useful at the beginning of a grace period). | ||
301 | */ | ||
302 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
303 | { | ||
304 | rsp->dynticks_completed = comp; | ||
305 | } | ||
306 | |||
307 | #ifdef CONFIG_SMP | 344 | #ifdef CONFIG_SMP |
308 | 345 | ||
309 | /* | 346 | /* |
310 | * Recall the previously recorded value of the completion for dynticks. | ||
311 | */ | ||
312 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
313 | { | ||
314 | return rsp->dynticks_completed; | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Snapshot the specified CPU's dynticks counter so that we can later | 347 | * Snapshot the specified CPU's dynticks counter so that we can later |
319 | * credit them with an implicit quiescent state. Return 1 if this CPU | 348 | * credit them with an implicit quiescent state. Return 1 if this CPU |
320 | * is already in a quiescent state courtesy of dynticks idle mode. | 349 | * is in dynticks idle mode, which is an extended quiescent state. |
321 | */ | 350 | */ |
322 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | 351 | static int dyntick_save_progress_counter(struct rcu_data *rdp) |
323 | { | 352 | { |
@@ -377,24 +406,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | |||
377 | 406 | ||
378 | #else /* #ifdef CONFIG_NO_HZ */ | 407 | #else /* #ifdef CONFIG_NO_HZ */ |
379 | 408 | ||
380 | static void dyntick_record_completed(struct rcu_state *rsp, long comp) | ||
381 | { | ||
382 | } | ||
383 | |||
384 | #ifdef CONFIG_SMP | 409 | #ifdef CONFIG_SMP |
385 | 410 | ||
386 | /* | ||
387 | * If there are no dynticks, then the only way that a CPU can passively | ||
388 | * be in a quiescent state is to be offline. Unlike dynticks idle, which | ||
389 | * is a point in time during the prior (already finished) grace period, | ||
390 | * an offline CPU is always in a quiescent state, and thus can be | ||
391 | * unconditionally applied. So just return the current value of completed. | ||
392 | */ | ||
393 | static long dyntick_recall_completed(struct rcu_state *rsp) | ||
394 | { | ||
395 | return rsp->completed; | ||
396 | } | ||
397 | |||
398 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | 411 | static int dyntick_save_progress_counter(struct rcu_data *rdp) |
399 | { | 412 | { |
400 | return 0; | 413 | return 0; |
@@ -423,32 +436,39 @@ static void print_other_cpu_stall(struct rcu_state *rsp) | |||
423 | long delta; | 436 | long delta; |
424 | unsigned long flags; | 437 | unsigned long flags; |
425 | struct rcu_node *rnp = rcu_get_root(rsp); | 438 | struct rcu_node *rnp = rcu_get_root(rsp); |
426 | struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; | ||
427 | struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES]; | ||
428 | 439 | ||
429 | /* Only let one CPU complain about others per time interval. */ | 440 | /* Only let one CPU complain about others per time interval. */ |
430 | 441 | ||
431 | spin_lock_irqsave(&rnp->lock, flags); | 442 | spin_lock_irqsave(&rnp->lock, flags); |
432 | delta = jiffies - rsp->jiffies_stall; | 443 | delta = jiffies - rsp->jiffies_stall; |
433 | if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { | 444 | if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) { |
434 | spin_unlock_irqrestore(&rnp->lock, flags); | 445 | spin_unlock_irqrestore(&rnp->lock, flags); |
435 | return; | 446 | return; |
436 | } | 447 | } |
437 | rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | 448 | rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; |
449 | |||
450 | /* | ||
451 | * Now rat on any tasks that got kicked up to the root rcu_node | ||
452 | * due to CPU offlining. | ||
453 | */ | ||
454 | rcu_print_task_stall(rnp); | ||
438 | spin_unlock_irqrestore(&rnp->lock, flags); | 455 | spin_unlock_irqrestore(&rnp->lock, flags); |
439 | 456 | ||
440 | /* OK, time to rat on our buddy... */ | 457 | /* OK, time to rat on our buddy... */ |
441 | 458 | ||
442 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); | 459 | printk(KERN_ERR "INFO: RCU detected CPU stalls:"); |
443 | for (; rnp_cur < rnp_end; rnp_cur++) { | 460 | rcu_for_each_leaf_node(rsp, rnp) { |
444 | if (rnp_cur->qsmask == 0) | 461 | rcu_print_task_stall(rnp); |
462 | if (rnp->qsmask == 0) | ||
445 | continue; | 463 | continue; |
446 | for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) | 464 | for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) |
447 | if (rnp_cur->qsmask & (1UL << cpu)) | 465 | if (rnp->qsmask & (1UL << cpu)) |
448 | printk(" %d", rnp_cur->grplo + cpu); | 466 | printk(" %d", rnp->grplo + cpu); |
449 | } | 467 | } |
450 | printk(" (detected by %d, t=%ld jiffies)\n", | 468 | printk(" (detected by %d, t=%ld jiffies)\n", |
451 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); | 469 | smp_processor_id(), (long)(jiffies - rsp->gp_start)); |
470 | trigger_all_cpu_backtrace(); | ||
471 | |||
452 | force_quiescent_state(rsp, 0); /* Kick them all. */ | 472 | force_quiescent_state(rsp, 0); /* Kick them all. */ |
453 | } | 473 | } |
454 | 474 | ||
@@ -459,12 +479,14 @@ static void print_cpu_stall(struct rcu_state *rsp) | |||
459 | 479 | ||
460 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", | 480 | printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu jiffies)\n", |
461 | smp_processor_id(), jiffies - rsp->gp_start); | 481 | smp_processor_id(), jiffies - rsp->gp_start); |
462 | dump_stack(); | 482 | trigger_all_cpu_backtrace(); |
483 | |||
463 | spin_lock_irqsave(&rnp->lock, flags); | 484 | spin_lock_irqsave(&rnp->lock, flags); |
464 | if ((long)(jiffies - rsp->jiffies_stall) >= 0) | 485 | if ((long)(jiffies - rsp->jiffies_stall) >= 0) |
465 | rsp->jiffies_stall = | 486 | rsp->jiffies_stall = |
466 | jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | 487 | jiffies + RCU_SECONDS_TILL_STALL_RECHECK; |
467 | spin_unlock_irqrestore(&rnp->lock, flags); | 488 | spin_unlock_irqrestore(&rnp->lock, flags); |
489 | |||
468 | set_need_resched(); /* kick ourselves to get things going. */ | 490 | set_need_resched(); /* kick ourselves to get things going. */ |
469 | } | 491 | } |
470 | 492 | ||
@@ -480,8 +502,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
480 | /* We haven't checked in, so go dump stack. */ | 502 | /* We haven't checked in, so go dump stack. */ |
481 | print_cpu_stall(rsp); | 503 | print_cpu_stall(rsp); |
482 | 504 | ||
483 | } else if (rsp->gpnum != rsp->completed && | 505 | } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) { |
484 | delta >= RCU_STALL_RAT_DELAY) { | ||
485 | 506 | ||
486 | /* They had two time units to dump stack, so complain. */ | 507 | /* They had two time units to dump stack, so complain. */ |
487 | print_other_cpu_stall(rsp); | 508 | print_other_cpu_stall(rsp); |
@@ -503,15 +524,33 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) | |||
503 | /* | 524 | /* |
504 | * Update CPU-local rcu_data state to record the newly noticed grace period. | 525 | * Update CPU-local rcu_data state to record the newly noticed grace period. |
505 | * This is used both when we started the grace period and when we notice | 526 | * This is used both when we started the grace period and when we notice |
506 | * that someone else started the grace period. | 527 | * that someone else started the grace period. The caller must hold the |
528 | * ->lock of the leaf rcu_node structure corresponding to the current CPU, | ||
529 | * and must have irqs disabled. | ||
507 | */ | 530 | */ |
531 | static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
532 | { | ||
533 | if (rdp->gpnum != rnp->gpnum) { | ||
534 | rdp->qs_pending = 1; | ||
535 | rdp->passed_quiesc = 0; | ||
536 | rdp->gpnum = rnp->gpnum; | ||
537 | } | ||
538 | } | ||
539 | |||
508 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) | 540 | static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) |
509 | { | 541 | { |
510 | rdp->qs_pending = 1; | 542 | unsigned long flags; |
511 | rdp->passed_quiesc = 0; | 543 | struct rcu_node *rnp; |
512 | rdp->gpnum = rsp->gpnum; | 544 | |
513 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | 545 | local_irq_save(flags); |
514 | RCU_JIFFIES_TILL_FORCE_QS; | 546 | rnp = rdp->mynode; |
547 | if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */ | ||
548 | !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
549 | local_irq_restore(flags); | ||
550 | return; | ||
551 | } | ||
552 | __note_new_gpnum(rsp, rnp, rdp); | ||
553 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
515 | } | 554 | } |
516 | 555 | ||
517 | /* | 556 | /* |
@@ -535,6 +574,79 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp) | |||
535 | } | 574 | } |
536 | 575 | ||
537 | /* | 576 | /* |
577 | * Advance this CPU's callbacks, but only if the current grace period | ||
578 | * has ended. This may be called only from the CPU to whom the rdp | ||
579 | * belongs. In addition, the corresponding leaf rcu_node structure's | ||
580 | * ->lock must be held by the caller, with irqs disabled. | ||
581 | */ | ||
582 | static void | ||
583 | __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
584 | { | ||
585 | /* Did another grace period end? */ | ||
586 | if (rdp->completed != rnp->completed) { | ||
587 | |||
588 | /* Advance callbacks. No harm if list empty. */ | ||
589 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
590 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
591 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
592 | |||
593 | /* Remember that we saw this grace-period completion. */ | ||
594 | rdp->completed = rnp->completed; | ||
595 | } | ||
596 | } | ||
597 | |||
598 | /* | ||
599 | * Advance this CPU's callbacks, but only if the current grace period | ||
600 | * has ended. This may be called only from the CPU to whom the rdp | ||
601 | * belongs. | ||
602 | */ | ||
603 | static void | ||
604 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | ||
605 | { | ||
606 | unsigned long flags; | ||
607 | struct rcu_node *rnp; | ||
608 | |||
609 | local_irq_save(flags); | ||
610 | rnp = rdp->mynode; | ||
611 | if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */ | ||
612 | !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */ | ||
613 | local_irq_restore(flags); | ||
614 | return; | ||
615 | } | ||
616 | __rcu_process_gp_end(rsp, rnp, rdp); | ||
617 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * Do per-CPU grace-period initialization for running CPU. The caller | ||
622 | * must hold the lock of the leaf rcu_node structure corresponding to | ||
623 | * this CPU. | ||
624 | */ | ||
625 | static void | ||
626 | rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp) | ||
627 | { | ||
628 | /* Prior grace period ended, so advance callbacks for current CPU. */ | ||
629 | __rcu_process_gp_end(rsp, rnp, rdp); | ||
630 | |||
631 | /* | ||
632 | * Because this CPU just now started the new grace period, we know | ||
633 | * that all of its callbacks will be covered by this upcoming grace | ||
634 | * period, even the ones that were registered arbitrarily recently. | ||
635 | * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. | ||
636 | * | ||
637 | * Other CPUs cannot be sure exactly when the grace period started. | ||
638 | * Therefore, their recently registered callbacks must pass through | ||
639 | * an additional RCU_NEXT_READY stage, so that they will be handled | ||
640 | * by the next RCU grace period. | ||
641 | */ | ||
642 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
643 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
644 | |||
645 | /* Set state so that this CPU will detect the next quiescent state. */ | ||
646 | __note_new_gpnum(rsp, rnp, rdp); | ||
647 | } | ||
648 | |||
649 | /* | ||
538 | * Start a new RCU grace period if warranted, re-initializing the hierarchy | 650 | * Start a new RCU grace period if warranted, re-initializing the hierarchy |
539 | * in preparation for detecting the next grace period. The caller must hold | 651 | * in preparation for detecting the next grace period. The caller must hold |
540 | * the root node's ->lock, which is released before return. Hard irqs must | 652 | * the root node's ->lock, which is released before return. Hard irqs must |
@@ -546,36 +658,43 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
546 | { | 658 | { |
547 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | 659 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; |
548 | struct rcu_node *rnp = rcu_get_root(rsp); | 660 | struct rcu_node *rnp = rcu_get_root(rsp); |
549 | struct rcu_node *rnp_cur; | ||
550 | struct rcu_node *rnp_end; | ||
551 | 661 | ||
552 | if (!cpu_needs_another_gp(rsp, rdp)) { | 662 | if (!cpu_needs_another_gp(rsp, rdp)) { |
553 | spin_unlock_irqrestore(&rnp->lock, flags); | 663 | if (rnp->completed == rsp->completed) { |
664 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
665 | return; | ||
666 | } | ||
667 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
668 | |||
669 | /* | ||
670 | * Propagate new ->completed value to rcu_node structures | ||
671 | * so that other CPUs don't have to wait until the start | ||
672 | * of the next grace period to process their callbacks. | ||
673 | */ | ||
674 | rcu_for_each_node_breadth_first(rsp, rnp) { | ||
675 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
676 | rnp->completed = rsp->completed; | ||
677 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
678 | } | ||
679 | local_irq_restore(flags); | ||
554 | return; | 680 | return; |
555 | } | 681 | } |
556 | 682 | ||
557 | /* Advance to a new grace period and initialize state. */ | 683 | /* Advance to a new grace period and initialize state. */ |
558 | rsp->gpnum++; | 684 | rsp->gpnum++; |
685 | WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT); | ||
559 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ | 686 | rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ |
560 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 687 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
561 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | ||
562 | RCU_JIFFIES_TILL_FORCE_QS; | ||
563 | record_gp_stall_check_time(rsp); | 688 | record_gp_stall_check_time(rsp); |
564 | dyntick_record_completed(rsp, rsp->completed - 1); | ||
565 | note_new_gpnum(rsp, rdp); | ||
566 | |||
567 | /* | ||
568 | * Because we are first, we know that all our callbacks will | ||
569 | * be covered by this upcoming grace period, even the ones | ||
570 | * that were registered arbitrarily recently. | ||
571 | */ | ||
572 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
573 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
574 | 689 | ||
575 | /* Special-case the common single-level case. */ | 690 | /* Special-case the common single-level case. */ |
576 | if (NUM_RCU_NODES == 1) { | 691 | if (NUM_RCU_NODES == 1) { |
692 | rcu_preempt_check_blocked_tasks(rnp); | ||
577 | rnp->qsmask = rnp->qsmaskinit; | 693 | rnp->qsmask = rnp->qsmaskinit; |
694 | rnp->gpnum = rsp->gpnum; | ||
695 | rnp->completed = rsp->completed; | ||
578 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ | 696 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ |
697 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
579 | spin_unlock_irqrestore(&rnp->lock, flags); | 698 | spin_unlock_irqrestore(&rnp->lock, flags); |
580 | return; | 699 | return; |
581 | } | 700 | } |
@@ -587,88 +706,71 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
587 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ | 706 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ |
588 | 707 | ||
589 | /* | 708 | /* |
590 | * Set the quiescent-state-needed bits in all the non-leaf RCU | 709 | * Set the quiescent-state-needed bits in all the rcu_node |
591 | * nodes for all currently online CPUs. This operation relies | 710 | * structures for all currently online CPUs in breadth-first |
592 | * on the layout of the hierarchy within the rsp->node[] array. | 711 | * order, starting from the root rcu_node structure. This |
593 | * Note that other CPUs will access only the leaves of the | 712 | * operation relies on the layout of the hierarchy within the |
594 | * hierarchy, which still indicate that no grace period is in | 713 | * rsp->node[] array. Note that other CPUs will access only |
595 | * progress. In addition, we have excluded CPU-hotplug operations. | 714 | * the leaves of the hierarchy, which still indicate that no |
596 | * | 715 | * grace period is in progress, at least until the corresponding |
597 | * We therefore do not need to hold any locks. Any required | 716 | * leaf node has been initialized. In addition, we have excluded |
598 | * memory barriers will be supplied by the locks guarding the | 717 | * CPU-hotplug operations. |
599 | * leaf rcu_nodes in the hierarchy. | ||
600 | */ | ||
601 | |||
602 | rnp_end = rsp->level[NUM_RCU_LVLS - 1]; | ||
603 | for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) | ||
604 | rnp_cur->qsmask = rnp_cur->qsmaskinit; | ||
605 | |||
606 | /* | ||
607 | * Now set up the leaf nodes. Here we must be careful. First, | ||
608 | * we need to hold the lock in order to exclude other CPUs, which | ||
609 | * might be contending for the leaf nodes' locks. Second, as | ||
610 | * soon as we initialize a given leaf node, its CPUs might run | ||
611 | * up the rest of the hierarchy. We must therefore acquire locks | ||
612 | * for each node that we touch during this stage. (But we still | ||
613 | * are excluding CPU-hotplug operations.) | ||
614 | * | 718 | * |
615 | * Note that the grace period cannot complete until we finish | 719 | * Note that the grace period cannot complete until we finish |
616 | * the initialization process, as there will be at least one | 720 | * the initialization process, as there will be at least one |
617 | * qsmask bit set in the root node until that time, namely the | 721 | * qsmask bit set in the root node until that time, namely the |
618 | * one corresponding to this CPU. | 722 | * one corresponding to this CPU, due to the fact that we have |
723 | * irqs disabled. | ||
619 | */ | 724 | */ |
620 | rnp_end = &rsp->node[NUM_RCU_NODES]; | 725 | rcu_for_each_node_breadth_first(rsp, rnp) { |
621 | rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; | 726 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
622 | for (; rnp_cur < rnp_end; rnp_cur++) { | 727 | rcu_preempt_check_blocked_tasks(rnp); |
623 | spin_lock(&rnp_cur->lock); /* irqs already disabled. */ | 728 | rnp->qsmask = rnp->qsmaskinit; |
624 | rnp_cur->qsmask = rnp_cur->qsmaskinit; | 729 | rnp->gpnum = rsp->gpnum; |
625 | spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ | 730 | rnp->completed = rsp->completed; |
731 | if (rnp == rdp->mynode) | ||
732 | rcu_start_gp_per_cpu(rsp, rnp, rdp); | ||
733 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
626 | } | 734 | } |
627 | 735 | ||
736 | rnp = rcu_get_root(rsp); | ||
737 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
628 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ | 738 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ |
739 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
629 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 740 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
630 | } | 741 | } |
631 | 742 | ||
632 | /* | 743 | /* |
633 | * Advance this CPU's callbacks, but only if the current grace period | 744 | * Report a full set of quiescent states to the specified rcu_state |
634 | * has ended. This may be called only from the CPU to whom the rdp | 745 | * data structure. This involves cleaning up after the prior grace |
635 | * belongs. | 746 | * period and letting rcu_start_gp() start up the next grace period |
747 | * if one is needed. Note that the caller must hold rnp->lock, as | ||
748 | * required by rcu_start_gp(), which will release it. | ||
636 | */ | 749 | */ |
637 | static void | 750 | static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) |
638 | rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp) | 751 | __releases(rcu_get_root(rsp)->lock) |
639 | { | 752 | { |
640 | long completed_snap; | 753 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
641 | unsigned long flags; | 754 | rsp->completed = rsp->gpnum; |
642 | 755 | rsp->signaled = RCU_GP_IDLE; | |
643 | local_irq_save(flags); | 756 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
644 | completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */ | ||
645 | |||
646 | /* Did another grace period end? */ | ||
647 | if (rdp->completed != completed_snap) { | ||
648 | |||
649 | /* Advance callbacks. No harm if list empty. */ | ||
650 | rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL]; | ||
651 | rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL]; | ||
652 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | ||
653 | |||
654 | /* Remember that we saw this grace-period completion. */ | ||
655 | rdp->completed = completed_snap; | ||
656 | } | ||
657 | local_irq_restore(flags); | ||
658 | } | 757 | } |
659 | 758 | ||
660 | /* | 759 | /* |
661 | * Similar to cpu_quiet(), for which it is a helper function. Allows | 760 | * Similar to rcu_report_qs_rdp(), for which it is a helper function. |
662 | * a group of CPUs to be quieted at one go, though all the CPUs in the | 761 | * Allows quiescent states for a group of CPUs to be reported at one go |
663 | * group must be represented by the same leaf rcu_node structure. | 762 | * to the specified rcu_node structure, though all the CPUs in the group |
664 | * That structure's lock must be held upon entry, and it is released | 763 | * must be represented by the same rcu_node structure (which need not be |
665 | * before return. | 764 | * a leaf rcu_node structure, though it often will be). That structure's |
765 | * lock must be held upon entry, and it is released before return. | ||
666 | */ | 766 | */ |
667 | static void | 767 | static void |
668 | cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | 768 | rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, |
669 | unsigned long flags) | 769 | struct rcu_node *rnp, unsigned long flags) |
670 | __releases(rnp->lock) | 770 | __releases(rnp->lock) |
671 | { | 771 | { |
772 | struct rcu_node *rnp_c; | ||
773 | |||
672 | /* Walk up the rcu_node hierarchy. */ | 774 | /* Walk up the rcu_node hierarchy. */ |
673 | for (;;) { | 775 | for (;;) { |
674 | if (!(rnp->qsmask & mask)) { | 776 | if (!(rnp->qsmask & mask)) { |
@@ -678,7 +780,7 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
678 | return; | 780 | return; |
679 | } | 781 | } |
680 | rnp->qsmask &= ~mask; | 782 | rnp->qsmask &= ~mask; |
681 | if (rnp->qsmask != 0) { | 783 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { |
682 | 784 | ||
683 | /* Other bits still set at this level, so done. */ | 785 | /* Other bits still set at this level, so done. */ |
684 | spin_unlock_irqrestore(&rnp->lock, flags); | 786 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -692,31 +794,31 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, | |||
692 | break; | 794 | break; |
693 | } | 795 | } |
694 | spin_unlock_irqrestore(&rnp->lock, flags); | 796 | spin_unlock_irqrestore(&rnp->lock, flags); |
797 | rnp_c = rnp; | ||
695 | rnp = rnp->parent; | 798 | rnp = rnp->parent; |
696 | spin_lock_irqsave(&rnp->lock, flags); | 799 | spin_lock_irqsave(&rnp->lock, flags); |
800 | WARN_ON_ONCE(rnp_c->qsmask); | ||
697 | } | 801 | } |
698 | 802 | ||
699 | /* | 803 | /* |
700 | * Get here if we are the last CPU to pass through a quiescent | 804 | * Get here if we are the last CPU to pass through a quiescent |
701 | * state for this grace period. Clean up and let rcu_start_gp() | 805 | * state for this grace period. Invoke rcu_report_qs_rsp() |
702 | * start up the next grace period if one is needed. Note that | 806 | * to clean up and start the next grace period if one is needed. |
703 | * we still hold rnp->lock, as required by rcu_start_gp(), which | ||
704 | * will release it. | ||
705 | */ | 807 | */ |
706 | rsp->completed = rsp->gpnum; | 808 | rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */ |
707 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | ||
708 | rcu_start_gp(rsp, flags); /* releases rnp->lock. */ | ||
709 | } | 809 | } |
710 | 810 | ||
711 | /* | 811 | /* |
712 | * Record a quiescent state for the specified CPU, which must either be | 812 | * Record a quiescent state for the specified CPU to that CPU's rcu_data |
713 | * the current CPU or an offline CPU. The lastcomp argument is used to | 813 | * structure. This must be either called from the specified CPU, or |
714 | * make sure we are still in the grace period of interest. We don't want | 814 | * called when the specified CPU is known to be offline (and when it is |
715 | * to end the current grace period based on quiescent states detected in | 815 | * also known that no other CPU is concurrently trying to help the offline |
716 | * an earlier grace period! | 816 | * CPU). The lastcomp argument is used to make sure we are still in the |
817 | * grace period of interest. We don't want to end the current grace period | ||
818 | * based on quiescent states detected in an earlier grace period! | ||
717 | */ | 819 | */ |
718 | static void | 820 | static void |
719 | cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | 821 | rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) |
720 | { | 822 | { |
721 | unsigned long flags; | 823 | unsigned long flags; |
722 | unsigned long mask; | 824 | unsigned long mask; |
@@ -724,15 +826,15 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
724 | 826 | ||
725 | rnp = rdp->mynode; | 827 | rnp = rdp->mynode; |
726 | spin_lock_irqsave(&rnp->lock, flags); | 828 | spin_lock_irqsave(&rnp->lock, flags); |
727 | if (lastcomp != ACCESS_ONCE(rsp->completed)) { | 829 | if (lastcomp != rnp->completed) { |
728 | 830 | ||
729 | /* | 831 | /* |
730 | * Someone beat us to it for this grace period, so leave. | 832 | * Someone beat us to it for this grace period, so leave. |
731 | * The race with GP start is resolved by the fact that we | 833 | * The race with GP start is resolved by the fact that we |
732 | * hold the leaf rcu_node lock, so that the per-CPU bits | 834 | * hold the leaf rcu_node lock, so that the per-CPU bits |
733 | * cannot yet be initialized -- so we would simply find our | 835 | * cannot yet be initialized -- so we would simply find our |
734 | * CPU's bit already cleared in cpu_quiet_msk() if this race | 836 | * CPU's bit already cleared in rcu_report_qs_rnp() if this |
735 | * occurred. | 837 | * race occurred. |
736 | */ | 838 | */ |
737 | rdp->passed_quiesc = 0; /* try again later! */ | 839 | rdp->passed_quiesc = 0; /* try again later! */ |
738 | spin_unlock_irqrestore(&rnp->lock, flags); | 840 | spin_unlock_irqrestore(&rnp->lock, flags); |
@@ -748,10 +850,9 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) | |||
748 | * This GP can't end until cpu checks in, so all of our | 850 | * This GP can't end until cpu checks in, so all of our |
749 | * callbacks can be processed during the next GP. | 851 | * callbacks can be processed during the next GP. |
750 | */ | 852 | */ |
751 | rdp = rsp->rda[smp_processor_id()]; | ||
752 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 853 | rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; |
753 | 854 | ||
754 | cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ | 855 | rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */ |
755 | } | 856 | } |
756 | } | 857 | } |
757 | 858 | ||
@@ -782,74 +883,113 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) | |||
782 | if (!rdp->passed_quiesc) | 883 | if (!rdp->passed_quiesc) |
783 | return; | 884 | return; |
784 | 885 | ||
785 | /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ | 886 | /* |
786 | cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | 887 | * Tell RCU we are done (but rcu_report_qs_rdp() will be the |
888 | * judge of that). | ||
889 | */ | ||
890 | rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); | ||
787 | } | 891 | } |
788 | 892 | ||
789 | #ifdef CONFIG_HOTPLUG_CPU | 893 | #ifdef CONFIG_HOTPLUG_CPU |
790 | 894 | ||
791 | /* | 895 | /* |
896 | * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the | ||
897 | * specified flavor of RCU. The callbacks will be adopted by the next | ||
898 | * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever | ||
899 | * comes first. Because this is invoked from the CPU_DYING notifier, | ||
900 | * irqs are already disabled. | ||
901 | */ | ||
902 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | ||
903 | { | ||
904 | int i; | ||
905 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | ||
906 | |||
907 | if (rdp->nxtlist == NULL) | ||
908 | return; /* irqs disabled, so comparison is stable. */ | ||
909 | spin_lock(&rsp->onofflock); /* irqs already disabled. */ | ||
910 | *rsp->orphan_cbs_tail = rdp->nxtlist; | ||
911 | rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL]; | ||
912 | rdp->nxtlist = NULL; | ||
913 | for (i = 0; i < RCU_NEXT_SIZE; i++) | ||
914 | rdp->nxttail[i] = &rdp->nxtlist; | ||
915 | rsp->orphan_qlen += rdp->qlen; | ||
916 | rdp->qlen = 0; | ||
917 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
918 | } | ||
919 | |||
920 | /* | ||
921 | * Adopt previously orphaned RCU callbacks. | ||
922 | */ | ||
923 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | ||
924 | { | ||
925 | unsigned long flags; | ||
926 | struct rcu_data *rdp; | ||
927 | |||
928 | spin_lock_irqsave(&rsp->onofflock, flags); | ||
929 | rdp = rsp->rda[smp_processor_id()]; | ||
930 | if (rsp->orphan_cbs_list == NULL) { | ||
931 | spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
932 | return; | ||
933 | } | ||
934 | *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list; | ||
935 | rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail; | ||
936 | rdp->qlen += rsp->orphan_qlen; | ||
937 | rsp->orphan_cbs_list = NULL; | ||
938 | rsp->orphan_cbs_tail = &rsp->orphan_cbs_list; | ||
939 | rsp->orphan_qlen = 0; | ||
940 | spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
941 | } | ||
942 | |||
943 | /* | ||
792 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy | 944 | * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy |
793 | * and move all callbacks from the outgoing CPU to the current one. | 945 | * and move all callbacks from the outgoing CPU to the current one. |
794 | */ | 946 | */ |
795 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | 947 | static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) |
796 | { | 948 | { |
797 | int i; | ||
798 | unsigned long flags; | 949 | unsigned long flags; |
799 | long lastcomp; | ||
800 | unsigned long mask; | 950 | unsigned long mask; |
951 | int need_report = 0; | ||
801 | struct rcu_data *rdp = rsp->rda[cpu]; | 952 | struct rcu_data *rdp = rsp->rda[cpu]; |
802 | struct rcu_data *rdp_me; | ||
803 | struct rcu_node *rnp; | 953 | struct rcu_node *rnp; |
804 | 954 | ||
805 | /* Exclude any attempts to start a new grace period. */ | 955 | /* Exclude any attempts to start a new grace period. */ |
806 | spin_lock_irqsave(&rsp->onofflock, flags); | 956 | spin_lock_irqsave(&rsp->onofflock, flags); |
807 | 957 | ||
808 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ | 958 | /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ |
809 | rnp = rdp->mynode; | 959 | rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */ |
810 | mask = rdp->grpmask; /* rnp->grplo is constant. */ | 960 | mask = rdp->grpmask; /* rnp->grplo is constant. */ |
811 | do { | 961 | do { |
812 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 962 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
813 | rnp->qsmaskinit &= ~mask; | 963 | rnp->qsmaskinit &= ~mask; |
814 | if (rnp->qsmaskinit != 0) { | 964 | if (rnp->qsmaskinit != 0) { |
815 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 965 | if (rnp != rdp->mynode) |
966 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
816 | break; | 967 | break; |
817 | } | 968 | } |
969 | if (rnp == rdp->mynode) | ||
970 | need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); | ||
971 | else | ||
972 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
818 | mask = rnp->grpmask; | 973 | mask = rnp->grpmask; |
819 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | ||
820 | rnp = rnp->parent; | 974 | rnp = rnp->parent; |
821 | } while (rnp != NULL); | 975 | } while (rnp != NULL); |
822 | lastcomp = rsp->completed; | ||
823 | |||
824 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | ||
825 | |||
826 | /* Being offline is a quiescent state, so go record it. */ | ||
827 | cpu_quiet(cpu, rsp, rdp, lastcomp); | ||
828 | 976 | ||
829 | /* | 977 | /* |
830 | * Move callbacks from the outgoing CPU to the running CPU. | 978 | * We still hold the leaf rcu_node structure lock here, and |
831 | * Note that the outgoing CPU is now quiscent, so it is now | 979 | * irqs are still disabled. The reason for this subterfuge is |
832 | * (uncharacteristically) safe to access it rcu_data structure. | 980 | * because invoking rcu_report_unblock_qs_rnp() with ->onofflock |
833 | * Note also that we must carefully retain the order of the | 981 | * held leads to deadlock. |
834 | * outgoing CPU's callbacks in order for rcu_barrier() to work | ||
835 | * correctly. Finally, note that we start all the callbacks | ||
836 | * afresh, even those that have passed through a grace period | ||
837 | * and are therefore ready to invoke. The theory is that hotplug | ||
838 | * events are rare, and that if they are frequent enough to | ||
839 | * indefinitely delay callbacks, you have far worse things to | ||
840 | * be worrying about. | ||
841 | */ | 982 | */ |
842 | rdp_me = rsp->rda[smp_processor_id()]; | 983 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ |
843 | if (rdp->nxtlist != NULL) { | 984 | rnp = rdp->mynode; |
844 | *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; | 985 | if (need_report & RCU_OFL_TASKS_NORM_GP) |
845 | rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; | 986 | rcu_report_unblock_qs_rnp(rnp, flags); |
846 | rdp->nxtlist = NULL; | 987 | else |
847 | for (i = 0; i < RCU_NEXT_SIZE; i++) | 988 | spin_unlock_irqrestore(&rnp->lock, flags); |
848 | rdp->nxttail[i] = &rdp->nxtlist; | 989 | if (need_report & RCU_OFL_TASKS_EXP_GP) |
849 | rdp_me->qlen += rdp->qlen; | 990 | rcu_report_exp_rnp(rsp, rnp); |
850 | rdp->qlen = 0; | 991 | |
851 | } | 992 | rcu_adopt_orphan_cbs(rsp); |
852 | local_irq_restore(flags); | ||
853 | } | 993 | } |
854 | 994 | ||
855 | /* | 995 | /* |
@@ -860,12 +1000,21 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
860 | */ | 1000 | */ |
861 | static void rcu_offline_cpu(int cpu) | 1001 | static void rcu_offline_cpu(int cpu) |
862 | { | 1002 | { |
863 | __rcu_offline_cpu(cpu, &rcu_state); | 1003 | __rcu_offline_cpu(cpu, &rcu_sched_state); |
864 | __rcu_offline_cpu(cpu, &rcu_bh_state); | 1004 | __rcu_offline_cpu(cpu, &rcu_bh_state); |
1005 | rcu_preempt_offline_cpu(cpu); | ||
865 | } | 1006 | } |
866 | 1007 | ||
867 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | 1008 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ |
868 | 1009 | ||
1010 | static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp) | ||
1011 | { | ||
1012 | } | ||
1013 | |||
1014 | static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) | ||
1015 | { | ||
1016 | } | ||
1017 | |||
869 | static void rcu_offline_cpu(int cpu) | 1018 | static void rcu_offline_cpu(int cpu) |
870 | { | 1019 | { |
871 | } | 1020 | } |
@@ -876,7 +1025,7 @@ static void rcu_offline_cpu(int cpu) | |||
876 | * Invoke any RCU callbacks that have made it to the end of their grace | 1025 | * Invoke any RCU callbacks that have made it to the end of their grace |
877 | * period. Thottle as specified by rdp->blimit. | 1026 | * period. Thottle as specified by rdp->blimit. |
878 | */ | 1027 | */ |
879 | static void rcu_do_batch(struct rcu_data *rdp) | 1028 | static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) |
880 | { | 1029 | { |
881 | unsigned long flags; | 1030 | unsigned long flags; |
882 | struct rcu_head *next, *list, **tail; | 1031 | struct rcu_head *next, *list, **tail; |
@@ -929,6 +1078,13 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
929 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) | 1078 | if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) |
930 | rdp->blimit = blimit; | 1079 | rdp->blimit = blimit; |
931 | 1080 | ||
1081 | /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */ | ||
1082 | if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) { | ||
1083 | rdp->qlen_last_fqs_check = 0; | ||
1084 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1085 | } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark) | ||
1086 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1087 | |||
932 | local_irq_restore(flags); | 1088 | local_irq_restore(flags); |
933 | 1089 | ||
934 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1090 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
@@ -947,6 +1103,8 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
947 | */ | 1103 | */ |
948 | void rcu_check_callbacks(int cpu, int user) | 1104 | void rcu_check_callbacks(int cpu, int user) |
949 | { | 1105 | { |
1106 | if (!rcu_pending(cpu)) | ||
1107 | return; /* if nothing for RCU to do. */ | ||
950 | if (user || | 1108 | if (user || |
951 | (idle_cpu(cpu) && rcu_scheduler_active && | 1109 | (idle_cpu(cpu) && rcu_scheduler_active && |
952 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | 1110 | !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) { |
@@ -955,17 +1113,16 @@ void rcu_check_callbacks(int cpu, int user) | |||
955 | * Get here if this CPU took its interrupt from user | 1113 | * Get here if this CPU took its interrupt from user |
956 | * mode or from the idle loop, and if this is not a | 1114 | * mode or from the idle loop, and if this is not a |
957 | * nested interrupt. In this case, the CPU is in | 1115 | * nested interrupt. In this case, the CPU is in |
958 | * a quiescent state, so count it. | 1116 | * a quiescent state, so note it. |
959 | * | 1117 | * |
960 | * No memory barrier is required here because both | 1118 | * No memory barrier is required here because both |
961 | * rcu_qsctr_inc() and rcu_bh_qsctr_inc() reference | 1119 | * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local |
962 | * only CPU-local variables that other CPUs neither | 1120 | * variables that other CPUs neither access nor modify, |
963 | * access nor modify, at least not while the corresponding | 1121 | * at least not while the corresponding CPU is online. |
964 | * CPU is online. | ||
965 | */ | 1122 | */ |
966 | 1123 | ||
967 | rcu_qsctr_inc(cpu); | 1124 | rcu_sched_qs(cpu); |
968 | rcu_bh_qsctr_inc(cpu); | 1125 | rcu_bh_qs(cpu); |
969 | 1126 | ||
970 | } else if (!in_softirq()) { | 1127 | } else if (!in_softirq()) { |
971 | 1128 | ||
@@ -973,11 +1130,12 @@ void rcu_check_callbacks(int cpu, int user) | |||
973 | * Get here if this CPU did not take its interrupt from | 1130 | * Get here if this CPU did not take its interrupt from |
974 | * softirq, in other words, if it is not interrupting | 1131 | * softirq, in other words, if it is not interrupting |
975 | * a rcu_bh read-side critical section. This is an _bh | 1132 | * a rcu_bh read-side critical section. This is an _bh |
976 | * critical section, so count it. | 1133 | * critical section, so note it. |
977 | */ | 1134 | */ |
978 | 1135 | ||
979 | rcu_bh_qsctr_inc(cpu); | 1136 | rcu_bh_qs(cpu); |
980 | } | 1137 | } |
1138 | rcu_preempt_check_callbacks(cpu); | ||
981 | raise_softirq(RCU_SOFTIRQ); | 1139 | raise_softirq(RCU_SOFTIRQ); |
982 | } | 1140 | } |
983 | 1141 | ||
@@ -996,33 +1154,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp, | |||
996 | int cpu; | 1154 | int cpu; |
997 | unsigned long flags; | 1155 | unsigned long flags; |
998 | unsigned long mask; | 1156 | unsigned long mask; |
999 | struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; | 1157 | struct rcu_node *rnp; |
1000 | struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES]; | ||
1001 | 1158 | ||
1002 | for (; rnp_cur < rnp_end; rnp_cur++) { | 1159 | rcu_for_each_leaf_node(rsp, rnp) { |
1003 | mask = 0; | 1160 | mask = 0; |
1004 | spin_lock_irqsave(&rnp_cur->lock, flags); | 1161 | spin_lock_irqsave(&rnp->lock, flags); |
1005 | if (rsp->completed != lastcomp) { | 1162 | if (rnp->completed != lastcomp) { |
1006 | spin_unlock_irqrestore(&rnp_cur->lock, flags); | 1163 | spin_unlock_irqrestore(&rnp->lock, flags); |
1007 | return 1; | 1164 | return 1; |
1008 | } | 1165 | } |
1009 | if (rnp_cur->qsmask == 0) { | 1166 | if (rnp->qsmask == 0) { |
1010 | spin_unlock_irqrestore(&rnp_cur->lock, flags); | 1167 | spin_unlock_irqrestore(&rnp->lock, flags); |
1011 | continue; | 1168 | continue; |
1012 | } | 1169 | } |
1013 | cpu = rnp_cur->grplo; | 1170 | cpu = rnp->grplo; |
1014 | bit = 1; | 1171 | bit = 1; |
1015 | for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { | 1172 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { |
1016 | if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) | 1173 | if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu])) |
1017 | mask |= bit; | 1174 | mask |= bit; |
1018 | } | 1175 | } |
1019 | if (mask != 0 && rsp->completed == lastcomp) { | 1176 | if (mask != 0 && rnp->completed == lastcomp) { |
1020 | 1177 | ||
1021 | /* cpu_quiet_msk() releases rnp_cur->lock. */ | 1178 | /* rcu_report_qs_rnp() releases rnp->lock. */ |
1022 | cpu_quiet_msk(mask, rsp, rnp_cur, flags); | 1179 | rcu_report_qs_rnp(mask, rsp, rnp, flags); |
1023 | continue; | 1180 | continue; |
1024 | } | 1181 | } |
1025 | spin_unlock_irqrestore(&rnp_cur->lock, flags); | 1182 | spin_unlock_irqrestore(&rnp->lock, flags); |
1026 | } | 1183 | } |
1027 | return 0; | 1184 | return 0; |
1028 | } | 1185 | } |
@@ -1035,37 +1192,35 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1035 | { | 1192 | { |
1036 | unsigned long flags; | 1193 | unsigned long flags; |
1037 | long lastcomp; | 1194 | long lastcomp; |
1038 | struct rcu_data *rdp = rsp->rda[smp_processor_id()]; | ||
1039 | struct rcu_node *rnp = rcu_get_root(rsp); | 1195 | struct rcu_node *rnp = rcu_get_root(rsp); |
1040 | u8 signaled; | 1196 | u8 signaled; |
1197 | u8 forcenow; | ||
1041 | 1198 | ||
1042 | if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) | 1199 | if (!rcu_gp_in_progress(rsp)) |
1043 | return; /* No grace period in progress, nothing to force. */ | 1200 | return; /* No grace period in progress, nothing to force. */ |
1044 | if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { | 1201 | if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { |
1045 | rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ | 1202 | rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ |
1046 | return; /* Someone else is already on the job. */ | 1203 | return; /* Someone else is already on the job. */ |
1047 | } | 1204 | } |
1048 | if (relaxed && | 1205 | if (relaxed && |
1049 | (long)(rsp->jiffies_force_qs - jiffies) >= 0 && | 1206 | (long)(rsp->jiffies_force_qs - jiffies) >= 0) |
1050 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0) | ||
1051 | goto unlock_ret; /* no emergency and done recently. */ | 1207 | goto unlock_ret; /* no emergency and done recently. */ |
1052 | rsp->n_force_qs++; | 1208 | rsp->n_force_qs++; |
1053 | spin_lock(&rnp->lock); | 1209 | spin_lock(&rnp->lock); |
1054 | lastcomp = rsp->completed; | 1210 | lastcomp = rsp->gpnum - 1; |
1055 | signaled = rsp->signaled; | 1211 | signaled = rsp->signaled; |
1056 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; | 1212 | rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; |
1057 | rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending + | 1213 | if(!rcu_gp_in_progress(rsp)) { |
1058 | RCU_JIFFIES_TILL_FORCE_QS; | ||
1059 | if (lastcomp == rsp->gpnum) { | ||
1060 | rsp->n_force_qs_ngp++; | 1214 | rsp->n_force_qs_ngp++; |
1061 | spin_unlock(&rnp->lock); | 1215 | spin_unlock(&rnp->lock); |
1062 | goto unlock_ret; /* no GP in progress, time updated. */ | 1216 | goto unlock_ret; /* no GP in progress, time updated. */ |
1063 | } | 1217 | } |
1064 | spin_unlock(&rnp->lock); | 1218 | spin_unlock(&rnp->lock); |
1065 | switch (signaled) { | 1219 | switch (signaled) { |
1220 | case RCU_GP_IDLE: | ||
1066 | case RCU_GP_INIT: | 1221 | case RCU_GP_INIT: |
1067 | 1222 | ||
1068 | break; /* grace period still initializing, ignore. */ | 1223 | break; /* grace period idle or initializing, ignore. */ |
1069 | 1224 | ||
1070 | case RCU_SAVE_DYNTICK: | 1225 | case RCU_SAVE_DYNTICK: |
1071 | 1226 | ||
@@ -1076,20 +1231,29 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
1076 | if (rcu_process_dyntick(rsp, lastcomp, | 1231 | if (rcu_process_dyntick(rsp, lastcomp, |
1077 | dyntick_save_progress_counter)) | 1232 | dyntick_save_progress_counter)) |
1078 | goto unlock_ret; | 1233 | goto unlock_ret; |
1234 | /* fall into next case. */ | ||
1235 | |||
1236 | case RCU_SAVE_COMPLETED: | ||
1079 | 1237 | ||
1080 | /* Update state, record completion counter. */ | 1238 | /* Update state, record completion counter. */ |
1239 | forcenow = 0; | ||
1081 | spin_lock(&rnp->lock); | 1240 | spin_lock(&rnp->lock); |
1082 | if (lastcomp == rsp->completed) { | 1241 | if (lastcomp + 1 == rsp->gpnum && |
1242 | lastcomp == rsp->completed && | ||
1243 | rsp->signaled == signaled) { | ||
1083 | rsp->signaled = RCU_FORCE_QS; | 1244 | rsp->signaled = RCU_FORCE_QS; |
1084 | dyntick_record_completed(rsp, lastcomp); | 1245 | rsp->completed_fqs = lastcomp; |
1246 | forcenow = signaled == RCU_SAVE_COMPLETED; | ||
1085 | } | 1247 | } |
1086 | spin_unlock(&rnp->lock); | 1248 | spin_unlock(&rnp->lock); |
1087 | break; | 1249 | if (!forcenow) |
1250 | break; | ||
1251 | /* fall into next case. */ | ||
1088 | 1252 | ||
1089 | case RCU_FORCE_QS: | 1253 | case RCU_FORCE_QS: |
1090 | 1254 | ||
1091 | /* Check dyntick-idle state, send IPI to laggarts. */ | 1255 | /* Check dyntick-idle state, send IPI to laggarts. */ |
1092 | if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), | 1256 | if (rcu_process_dyntick(rsp, rsp->completed_fqs, |
1093 | rcu_implicit_dynticks_qs)) | 1257 | rcu_implicit_dynticks_qs)) |
1094 | goto unlock_ret; | 1258 | goto unlock_ret; |
1095 | 1259 | ||
@@ -1120,12 +1284,13 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1120 | { | 1284 | { |
1121 | unsigned long flags; | 1285 | unsigned long flags; |
1122 | 1286 | ||
1287 | WARN_ON_ONCE(rdp->beenonline == 0); | ||
1288 | |||
1123 | /* | 1289 | /* |
1124 | * If an RCU GP has gone long enough, go check for dyntick | 1290 | * If an RCU GP has gone long enough, go check for dyntick |
1125 | * idle CPUs and, if needed, send resched IPIs. | 1291 | * idle CPUs and, if needed, send resched IPIs. |
1126 | */ | 1292 | */ |
1127 | if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | 1293 | if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) |
1128 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) | ||
1129 | force_quiescent_state(rsp, 1); | 1294 | force_quiescent_state(rsp, 1); |
1130 | 1295 | ||
1131 | /* | 1296 | /* |
@@ -1144,7 +1309,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1144 | } | 1309 | } |
1145 | 1310 | ||
1146 | /* If there are callbacks ready, invoke them. */ | 1311 | /* If there are callbacks ready, invoke them. */ |
1147 | rcu_do_batch(rdp); | 1312 | rcu_do_batch(rsp, rdp); |
1148 | } | 1313 | } |
1149 | 1314 | ||
1150 | /* | 1315 | /* |
@@ -1159,8 +1324,10 @@ static void rcu_process_callbacks(struct softirq_action *unused) | |||
1159 | */ | 1324 | */ |
1160 | smp_mb(); /* See above block comment. */ | 1325 | smp_mb(); /* See above block comment. */ |
1161 | 1326 | ||
1162 | __rcu_process_callbacks(&rcu_state, &__get_cpu_var(rcu_data)); | 1327 | __rcu_process_callbacks(&rcu_sched_state, |
1328 | &__get_cpu_var(rcu_sched_data)); | ||
1163 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | 1329 | __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); |
1330 | rcu_preempt_process_callbacks(); | ||
1164 | 1331 | ||
1165 | /* | 1332 | /* |
1166 | * Memory references from any later RCU read-side critical sections | 1333 | * Memory references from any later RCU read-side critical sections |
@@ -1198,7 +1365,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1198 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; | 1365 | rdp->nxttail[RCU_NEXT_TAIL] = &head->next; |
1199 | 1366 | ||
1200 | /* Start a new grace period if one not already started. */ | 1367 | /* Start a new grace period if one not already started. */ |
1201 | if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { | 1368 | if (!rcu_gp_in_progress(rsp)) { |
1202 | unsigned long nestflag; | 1369 | unsigned long nestflag; |
1203 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 1370 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
1204 | 1371 | ||
@@ -1206,24 +1373,33 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | |||
1206 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ | 1373 | rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ |
1207 | } | 1374 | } |
1208 | 1375 | ||
1209 | /* Force the grace period if too many callbacks or too long waiting. */ | 1376 | /* |
1210 | if (unlikely(++rdp->qlen > qhimark)) { | 1377 | * Force the grace period if too many callbacks or too long waiting. |
1378 | * Enforce hysteresis, and don't invoke force_quiescent_state() | ||
1379 | * if some other CPU has recently done so. Also, don't bother | ||
1380 | * invoking force_quiescent_state() if the newly enqueued callback | ||
1381 | * is the only one waiting for a grace period to complete. | ||
1382 | */ | ||
1383 | if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) { | ||
1211 | rdp->blimit = LONG_MAX; | 1384 | rdp->blimit = LONG_MAX; |
1212 | force_quiescent_state(rsp, 0); | 1385 | if (rsp->n_force_qs == rdp->n_force_qs_snap && |
1213 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | 1386 | *rdp->nxttail[RCU_DONE_TAIL] != head) |
1214 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0) | 1387 | force_quiescent_state(rsp, 0); |
1388 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1389 | rdp->qlen_last_fqs_check = rdp->qlen; | ||
1390 | } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) | ||
1215 | force_quiescent_state(rsp, 1); | 1391 | force_quiescent_state(rsp, 1); |
1216 | local_irq_restore(flags); | 1392 | local_irq_restore(flags); |
1217 | } | 1393 | } |
1218 | 1394 | ||
1219 | /* | 1395 | /* |
1220 | * Queue an RCU callback for invocation after a grace period. | 1396 | * Queue an RCU-sched callback for invocation after a grace period. |
1221 | */ | 1397 | */ |
1222 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 1398 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
1223 | { | 1399 | { |
1224 | __call_rcu(head, func, &rcu_state); | 1400 | __call_rcu(head, func, &rcu_sched_state); |
1225 | } | 1401 | } |
1226 | EXPORT_SYMBOL_GPL(call_rcu); | 1402 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
1227 | 1403 | ||
1228 | /* | 1404 | /* |
1229 | * Queue an RCU for invocation after a quicker grace period. | 1405 | * Queue an RCU for invocation after a quicker grace period. |
@@ -1234,6 +1410,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
1234 | } | 1410 | } |
1235 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 1411 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
1236 | 1412 | ||
1413 | /** | ||
1414 | * synchronize_sched - wait until an rcu-sched grace period has elapsed. | ||
1415 | * | ||
1416 | * Control will return to the caller some time after a full rcu-sched | ||
1417 | * grace period has elapsed, in other words after all currently executing | ||
1418 | * rcu-sched read-side critical sections have completed. These read-side | ||
1419 | * critical sections are delimited by rcu_read_lock_sched() and | ||
1420 | * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(), | ||
1421 | * local_irq_disable(), and so on may be used in place of | ||
1422 | * rcu_read_lock_sched(). | ||
1423 | * | ||
1424 | * This means that all preempt_disable code sequences, including NMI and | ||
1425 | * hardware-interrupt handlers, in progress on entry will have completed | ||
1426 | * before this primitive returns. However, this does not guarantee that | ||
1427 | * softirq handlers will have completed, since in some kernels, these | ||
1428 | * handlers can run in process context, and can block. | ||
1429 | * | ||
1430 | * This primitive provides the guarantees made by the (now removed) | ||
1431 | * synchronize_kernel() API. In contrast, synchronize_rcu() only | ||
1432 | * guarantees that rcu_read_lock() sections will have completed. | ||
1433 | * In "classic RCU", these two guarantees happen to be one and | ||
1434 | * the same, but can differ in realtime RCU implementations. | ||
1435 | */ | ||
1436 | void synchronize_sched(void) | ||
1437 | { | ||
1438 | struct rcu_synchronize rcu; | ||
1439 | |||
1440 | if (rcu_blocking_is_gp()) | ||
1441 | return; | ||
1442 | |||
1443 | init_completion(&rcu.completion); | ||
1444 | /* Will wake me after RCU finished. */ | ||
1445 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
1446 | /* Wait for it. */ | ||
1447 | wait_for_completion(&rcu.completion); | ||
1448 | } | ||
1449 | EXPORT_SYMBOL_GPL(synchronize_sched); | ||
1450 | |||
1451 | /** | ||
1452 | * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. | ||
1453 | * | ||
1454 | * Control will return to the caller some time after a full rcu_bh grace | ||
1455 | * period has elapsed, in other words after all currently executing rcu_bh | ||
1456 | * read-side critical sections have completed. RCU read-side critical | ||
1457 | * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), | ||
1458 | * and may be nested. | ||
1459 | */ | ||
1460 | void synchronize_rcu_bh(void) | ||
1461 | { | ||
1462 | struct rcu_synchronize rcu; | ||
1463 | |||
1464 | if (rcu_blocking_is_gp()) | ||
1465 | return; | ||
1466 | |||
1467 | init_completion(&rcu.completion); | ||
1468 | /* Will wake me after RCU finished. */ | ||
1469 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
1470 | /* Wait for it. */ | ||
1471 | wait_for_completion(&rcu.completion); | ||
1472 | } | ||
1473 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | ||
1474 | |||
1237 | /* | 1475 | /* |
1238 | * Check to see if there is any immediate RCU-related work to be done | 1476 | * Check to see if there is any immediate RCU-related work to be done |
1239 | * by the current CPU, for the specified type of RCU, returning 1 if so. | 1477 | * by the current CPU, for the specified type of RCU, returning 1 if so. |
@@ -1243,38 +1481,52 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); | |||
1243 | */ | 1481 | */ |
1244 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | 1482 | static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) |
1245 | { | 1483 | { |
1484 | struct rcu_node *rnp = rdp->mynode; | ||
1485 | |||
1246 | rdp->n_rcu_pending++; | 1486 | rdp->n_rcu_pending++; |
1247 | 1487 | ||
1248 | /* Check for CPU stalls, if enabled. */ | 1488 | /* Check for CPU stalls, if enabled. */ |
1249 | check_cpu_stall(rsp, rdp); | 1489 | check_cpu_stall(rsp, rdp); |
1250 | 1490 | ||
1251 | /* Is the RCU core waiting for a quiescent state from this CPU? */ | 1491 | /* Is the RCU core waiting for a quiescent state from this CPU? */ |
1252 | if (rdp->qs_pending) | 1492 | if (rdp->qs_pending) { |
1493 | rdp->n_rp_qs_pending++; | ||
1253 | return 1; | 1494 | return 1; |
1495 | } | ||
1254 | 1496 | ||
1255 | /* Does this CPU have callbacks ready to invoke? */ | 1497 | /* Does this CPU have callbacks ready to invoke? */ |
1256 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 1498 | if (cpu_has_callbacks_ready_to_invoke(rdp)) { |
1499 | rdp->n_rp_cb_ready++; | ||
1257 | return 1; | 1500 | return 1; |
1501 | } | ||
1258 | 1502 | ||
1259 | /* Has RCU gone idle with this CPU needing another grace period? */ | 1503 | /* Has RCU gone idle with this CPU needing another grace period? */ |
1260 | if (cpu_needs_another_gp(rsp, rdp)) | 1504 | if (cpu_needs_another_gp(rsp, rdp)) { |
1505 | rdp->n_rp_cpu_needs_gp++; | ||
1261 | return 1; | 1506 | return 1; |
1507 | } | ||
1262 | 1508 | ||
1263 | /* Has another RCU grace period completed? */ | 1509 | /* Has another RCU grace period completed? */ |
1264 | if (ACCESS_ONCE(rsp->completed) != rdp->completed) /* outside of lock */ | 1510 | if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */ |
1511 | rdp->n_rp_gp_completed++; | ||
1265 | return 1; | 1512 | return 1; |
1513 | } | ||
1266 | 1514 | ||
1267 | /* Has a new RCU grace period started? */ | 1515 | /* Has a new RCU grace period started? */ |
1268 | if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) /* outside of lock */ | 1516 | if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */ |
1517 | rdp->n_rp_gp_started++; | ||
1269 | return 1; | 1518 | return 1; |
1519 | } | ||
1270 | 1520 | ||
1271 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ | 1521 | /* Has an RCU GP gone long enough to send resched IPIs &c? */ |
1272 | if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && | 1522 | if (rcu_gp_in_progress(rsp) && |
1273 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || | 1523 | ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { |
1274 | (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)) | 1524 | rdp->n_rp_need_fqs++; |
1275 | return 1; | 1525 | return 1; |
1526 | } | ||
1276 | 1527 | ||
1277 | /* nothing to do */ | 1528 | /* nothing to do */ |
1529 | rdp->n_rp_need_nothing++; | ||
1278 | return 0; | 1530 | return 0; |
1279 | } | 1531 | } |
1280 | 1532 | ||
@@ -1283,10 +1535,11 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1283 | * by the current CPU, returning 1 if so. This function is part of the | 1535 | * by the current CPU, returning 1 if so. This function is part of the |
1284 | * RCU implementation; it is -not- an exported member of the RCU API. | 1536 | * RCU implementation; it is -not- an exported member of the RCU API. |
1285 | */ | 1537 | */ |
1286 | int rcu_pending(int cpu) | 1538 | static int rcu_pending(int cpu) |
1287 | { | 1539 | { |
1288 | return __rcu_pending(&rcu_state, &per_cpu(rcu_data, cpu)) || | 1540 | return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || |
1289 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)); | 1541 | __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) || |
1542 | rcu_preempt_pending(cpu); | ||
1290 | } | 1543 | } |
1291 | 1544 | ||
1292 | /* | 1545 | /* |
@@ -1298,51 +1551,150 @@ int rcu_pending(int cpu) | |||
1298 | int rcu_needs_cpu(int cpu) | 1551 | int rcu_needs_cpu(int cpu) |
1299 | { | 1552 | { |
1300 | /* RCU callbacks either ready or pending? */ | 1553 | /* RCU callbacks either ready or pending? */ |
1301 | return per_cpu(rcu_data, cpu).nxtlist || | 1554 | return per_cpu(rcu_sched_data, cpu).nxtlist || |
1302 | per_cpu(rcu_bh_data, cpu).nxtlist; | 1555 | per_cpu(rcu_bh_data, cpu).nxtlist || |
1556 | rcu_preempt_needs_cpu(cpu); | ||
1303 | } | 1557 | } |
1304 | 1558 | ||
1305 | /* | 1559 | /* |
1306 | * Initialize a CPU's per-CPU RCU data. We take this "scorched earth" | 1560 | * This function is invoked towards the end of the scheduler's initialization |
1307 | * approach so that we don't have to worry about how long the CPU has | 1561 | * process. Before this is called, the idle task might contain |
1308 | * been gone, or whether it ever was online previously. We do trust the | 1562 | * RCU read-side critical sections (during which time, this idle |
1309 | * ->mynode field, as it is constant for a given struct rcu_data and | 1563 | * task is booting the system). After this function is called, the |
1310 | * initialized during early boot. | 1564 | * idle tasks are prohibited from containing RCU read-side critical |
1311 | * | 1565 | * sections. |
1312 | * Note that only one online or offline event can be happening at a given | ||
1313 | * time. Note also that we can accept some slop in the rsp->completed | ||
1314 | * access due to the fact that this CPU cannot possibly have any RCU | ||
1315 | * callbacks in flight yet. | ||
1316 | */ | 1566 | */ |
1317 | static void __cpuinit | 1567 | void rcu_scheduler_starting(void) |
1318 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | 1568 | { |
1569 | WARN_ON(num_online_cpus() != 1); | ||
1570 | WARN_ON(nr_context_switches() > 0); | ||
1571 | rcu_scheduler_active = 1; | ||
1572 | } | ||
1573 | |||
1574 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | ||
1575 | static atomic_t rcu_barrier_cpu_count; | ||
1576 | static DEFINE_MUTEX(rcu_barrier_mutex); | ||
1577 | static struct completion rcu_barrier_completion; | ||
1578 | |||
1579 | static void rcu_barrier_callback(struct rcu_head *notused) | ||
1580 | { | ||
1581 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
1582 | complete(&rcu_barrier_completion); | ||
1583 | } | ||
1584 | |||
1585 | /* | ||
1586 | * Called with preemption disabled, and from cross-cpu IRQ context. | ||
1587 | */ | ||
1588 | static void rcu_barrier_func(void *type) | ||
1589 | { | ||
1590 | int cpu = smp_processor_id(); | ||
1591 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | ||
1592 | void (*call_rcu_func)(struct rcu_head *head, | ||
1593 | void (*func)(struct rcu_head *head)); | ||
1594 | |||
1595 | atomic_inc(&rcu_barrier_cpu_count); | ||
1596 | call_rcu_func = type; | ||
1597 | call_rcu_func(head, rcu_barrier_callback); | ||
1598 | } | ||
1599 | |||
1600 | /* | ||
1601 | * Orchestrate the specified type of RCU barrier, waiting for all | ||
1602 | * RCU callbacks of the specified type to complete. | ||
1603 | */ | ||
1604 | static void _rcu_barrier(struct rcu_state *rsp, | ||
1605 | void (*call_rcu_func)(struct rcu_head *head, | ||
1606 | void (*func)(struct rcu_head *head))) | ||
1607 | { | ||
1608 | BUG_ON(in_interrupt()); | ||
1609 | /* Take mutex to serialize concurrent rcu_barrier() requests. */ | ||
1610 | mutex_lock(&rcu_barrier_mutex); | ||
1611 | init_completion(&rcu_barrier_completion); | ||
1612 | /* | ||
1613 | * Initialize rcu_barrier_cpu_count to 1, then invoke | ||
1614 | * rcu_barrier_func() on each CPU, so that each CPU also has | ||
1615 | * incremented rcu_barrier_cpu_count. Only then is it safe to | ||
1616 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | ||
1617 | * might complete its grace period before all of the other CPUs | ||
1618 | * did their increment, causing this function to return too | ||
1619 | * early. | ||
1620 | */ | ||
1621 | atomic_set(&rcu_barrier_cpu_count, 1); | ||
1622 | preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */ | ||
1623 | rcu_adopt_orphan_cbs(rsp); | ||
1624 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); | ||
1625 | preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */ | ||
1626 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
1627 | complete(&rcu_barrier_completion); | ||
1628 | wait_for_completion(&rcu_barrier_completion); | ||
1629 | mutex_unlock(&rcu_barrier_mutex); | ||
1630 | } | ||
1631 | |||
1632 | /** | ||
1633 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. | ||
1634 | */ | ||
1635 | void rcu_barrier_bh(void) | ||
1636 | { | ||
1637 | _rcu_barrier(&rcu_bh_state, call_rcu_bh); | ||
1638 | } | ||
1639 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
1640 | |||
1641 | /** | ||
1642 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | ||
1643 | */ | ||
1644 | void rcu_barrier_sched(void) | ||
1645 | { | ||
1646 | _rcu_barrier(&rcu_sched_state, call_rcu_sched); | ||
1647 | } | ||
1648 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
1649 | |||
1650 | /* | ||
1651 | * Do boot-time initialization of a CPU's per-CPU RCU data. | ||
1652 | */ | ||
1653 | static void __init | ||
1654 | rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | ||
1319 | { | 1655 | { |
1320 | unsigned long flags; | 1656 | unsigned long flags; |
1321 | int i; | 1657 | int i; |
1322 | long lastcomp; | ||
1323 | unsigned long mask; | ||
1324 | struct rcu_data *rdp = rsp->rda[cpu]; | 1658 | struct rcu_data *rdp = rsp->rda[cpu]; |
1325 | struct rcu_node *rnp = rcu_get_root(rsp); | 1659 | struct rcu_node *rnp = rcu_get_root(rsp); |
1326 | 1660 | ||
1327 | /* Set up local state, ensuring consistent view of global state. */ | 1661 | /* Set up local state, ensuring consistent view of global state. */ |
1328 | spin_lock_irqsave(&rnp->lock, flags); | 1662 | spin_lock_irqsave(&rnp->lock, flags); |
1329 | lastcomp = rsp->completed; | ||
1330 | rdp->completed = lastcomp; | ||
1331 | rdp->gpnum = lastcomp; | ||
1332 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | ||
1333 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | ||
1334 | rdp->beenonline = 1; /* We have now been online. */ | ||
1335 | rdp->passed_quiesc_completed = lastcomp - 1; | ||
1336 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); | 1663 | rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); |
1337 | rdp->nxtlist = NULL; | 1664 | rdp->nxtlist = NULL; |
1338 | for (i = 0; i < RCU_NEXT_SIZE; i++) | 1665 | for (i = 0; i < RCU_NEXT_SIZE; i++) |
1339 | rdp->nxttail[i] = &rdp->nxtlist; | 1666 | rdp->nxttail[i] = &rdp->nxtlist; |
1340 | rdp->qlen = 0; | 1667 | rdp->qlen = 0; |
1341 | rdp->blimit = blimit; | ||
1342 | #ifdef CONFIG_NO_HZ | 1668 | #ifdef CONFIG_NO_HZ |
1343 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); | 1669 | rdp->dynticks = &per_cpu(rcu_dynticks, cpu); |
1344 | #endif /* #ifdef CONFIG_NO_HZ */ | 1670 | #endif /* #ifdef CONFIG_NO_HZ */ |
1345 | rdp->cpu = cpu; | 1671 | rdp->cpu = cpu; |
1672 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
1673 | } | ||
1674 | |||
1675 | /* | ||
1676 | * Initialize a CPU's per-CPU RCU data. Note that only one online or | ||
1677 | * offline event can be happening at a given time. Note also that we | ||
1678 | * can accept some slop in the rsp->completed access due to the fact | ||
1679 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | ||
1680 | */ | ||
1681 | static void __cpuinit | ||
1682 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) | ||
1683 | { | ||
1684 | unsigned long flags; | ||
1685 | unsigned long mask; | ||
1686 | struct rcu_data *rdp = rsp->rda[cpu]; | ||
1687 | struct rcu_node *rnp = rcu_get_root(rsp); | ||
1688 | |||
1689 | /* Set up local state, ensuring consistent view of global state. */ | ||
1690 | spin_lock_irqsave(&rnp->lock, flags); | ||
1691 | rdp->passed_quiesc = 0; /* We could be racing with new GP, */ | ||
1692 | rdp->qs_pending = 1; /* so set up to respond to current GP. */ | ||
1693 | rdp->beenonline = 1; /* We have now been online. */ | ||
1694 | rdp->preemptable = preemptable; | ||
1695 | rdp->qlen_last_fqs_check = 0; | ||
1696 | rdp->n_force_qs_snap = rsp->n_force_qs; | ||
1697 | rdp->blimit = blimit; | ||
1346 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 1698 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
1347 | 1699 | ||
1348 | /* | 1700 | /* |
@@ -1361,38 +1713,30 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
1361 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 1713 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
1362 | rnp->qsmaskinit |= mask; | 1714 | rnp->qsmaskinit |= mask; |
1363 | mask = rnp->grpmask; | 1715 | mask = rnp->grpmask; |
1716 | if (rnp == rdp->mynode) { | ||
1717 | rdp->gpnum = rnp->completed; /* if GP in progress... */ | ||
1718 | rdp->completed = rnp->completed; | ||
1719 | rdp->passed_quiesc_completed = rnp->completed - 1; | ||
1720 | } | ||
1364 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 1721 | spin_unlock(&rnp->lock); /* irqs already disabled. */ |
1365 | rnp = rnp->parent; | 1722 | rnp = rnp->parent; |
1366 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); | 1723 | } while (rnp != NULL && !(rnp->qsmaskinit & mask)); |
1367 | 1724 | ||
1368 | spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ | 1725 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
1369 | |||
1370 | /* | ||
1371 | * A new grace period might start here. If so, we will be part of | ||
1372 | * it, and its gpnum will be greater than ours, so we will | ||
1373 | * participate. It is also possible for the gpnum to have been | ||
1374 | * incremented before this function was called, and the bitmasks | ||
1375 | * to not be filled out until now, in which case we will also | ||
1376 | * participate due to our gpnum being behind. | ||
1377 | */ | ||
1378 | |||
1379 | /* Since it is coming online, the CPU is in a quiescent state. */ | ||
1380 | cpu_quiet(cpu, rsp, rdp, lastcomp); | ||
1381 | local_irq_restore(flags); | ||
1382 | } | 1726 | } |
1383 | 1727 | ||
1384 | static void __cpuinit rcu_online_cpu(int cpu) | 1728 | static void __cpuinit rcu_online_cpu(int cpu) |
1385 | { | 1729 | { |
1386 | rcu_init_percpu_data(cpu, &rcu_state); | 1730 | rcu_init_percpu_data(cpu, &rcu_sched_state, 0); |
1387 | rcu_init_percpu_data(cpu, &rcu_bh_state); | 1731 | rcu_init_percpu_data(cpu, &rcu_bh_state, 0); |
1388 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 1732 | rcu_preempt_init_percpu_data(cpu); |
1389 | } | 1733 | } |
1390 | 1734 | ||
1391 | /* | 1735 | /* |
1392 | * Handle CPU online/offline notifcation events. | 1736 | * Handle CPU online/offline notification events. |
1393 | */ | 1737 | */ |
1394 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 1738 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
1395 | unsigned long action, void *hcpu) | 1739 | unsigned long action, void *hcpu) |
1396 | { | 1740 | { |
1397 | long cpu = (long)hcpu; | 1741 | long cpu = (long)hcpu; |
1398 | 1742 | ||
@@ -1401,6 +1745,22 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1401 | case CPU_UP_PREPARE_FROZEN: | 1745 | case CPU_UP_PREPARE_FROZEN: |
1402 | rcu_online_cpu(cpu); | 1746 | rcu_online_cpu(cpu); |
1403 | break; | 1747 | break; |
1748 | case CPU_DYING: | ||
1749 | case CPU_DYING_FROZEN: | ||
1750 | /* | ||
1751 | * preempt_disable() in _rcu_barrier() prevents stop_machine(), | ||
1752 | * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" | ||
1753 | * returns, all online cpus have queued rcu_barrier_func(). | ||
1754 | * The dying CPU clears its cpu_online_mask bit and | ||
1755 | * moves all of its RCU callbacks to ->orphan_cbs_list | ||
1756 | * in the context of stop_machine(), so subsequent calls | ||
1757 | * to _rcu_barrier() will adopt these callbacks and only | ||
1758 | * then queue rcu_barrier_func() on all remaining CPUs. | ||
1759 | */ | ||
1760 | rcu_send_cbs_to_orphanage(&rcu_bh_state); | ||
1761 | rcu_send_cbs_to_orphanage(&rcu_sched_state); | ||
1762 | rcu_preempt_send_cbs_to_orphanage(); | ||
1763 | break; | ||
1404 | case CPU_DEAD: | 1764 | case CPU_DEAD: |
1405 | case CPU_DEAD_FROZEN: | 1765 | case CPU_DEAD_FROZEN: |
1406 | case CPU_UP_CANCELED: | 1766 | case CPU_UP_CANCELED: |
@@ -1464,6 +1824,8 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1464 | rnp = rsp->level[i]; | 1824 | rnp = rsp->level[i]; |
1465 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { | 1825 | for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { |
1466 | spin_lock_init(&rnp->lock); | 1826 | spin_lock_init(&rnp->lock); |
1827 | lockdep_set_class(&rnp->lock, &rcu_node_class[i]); | ||
1828 | rnp->gpnum = 0; | ||
1467 | rnp->qsmask = 0; | 1829 | rnp->qsmask = 0; |
1468 | rnp->qsmaskinit = 0; | 1830 | rnp->qsmaskinit = 0; |
1469 | rnp->grplo = j * cpustride; | 1831 | rnp->grplo = j * cpustride; |
@@ -1481,16 +1843,26 @@ static void __init rcu_init_one(struct rcu_state *rsp) | |||
1481 | j / rsp->levelspread[i - 1]; | 1843 | j / rsp->levelspread[i - 1]; |
1482 | } | 1844 | } |
1483 | rnp->level = i; | 1845 | rnp->level = i; |
1846 | INIT_LIST_HEAD(&rnp->blocked_tasks[0]); | ||
1847 | INIT_LIST_HEAD(&rnp->blocked_tasks[1]); | ||
1848 | INIT_LIST_HEAD(&rnp->blocked_tasks[2]); | ||
1849 | INIT_LIST_HEAD(&rnp->blocked_tasks[3]); | ||
1484 | } | 1850 | } |
1485 | } | 1851 | } |
1486 | } | 1852 | } |
1487 | 1853 | ||
1488 | /* | 1854 | /* |
1489 | * Helper macro for __rcu_init(). To be used nowhere else! | 1855 | * Helper macro for __rcu_init() and __rcu_init_preempt(). To be used |
1490 | * Assigns leaf node pointers into each CPU's rcu_data structure. | 1856 | * nowhere else! Assigns leaf node pointers into each CPU's rcu_data |
1857 | * structure. | ||
1491 | */ | 1858 | */ |
1492 | #define RCU_DATA_PTR_INIT(rsp, rcu_data) \ | 1859 | #define RCU_INIT_FLAVOR(rsp, rcu_data) \ |
1493 | do { \ | 1860 | do { \ |
1861 | int i; \ | ||
1862 | int j; \ | ||
1863 | struct rcu_node *rnp; \ | ||
1864 | \ | ||
1865 | rcu_init_one(rsp); \ | ||
1494 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ | 1866 | rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ |
1495 | j = 0; \ | 1867 | j = 0; \ |
1496 | for_each_possible_cpu(i) { \ | 1868 | for_each_possible_cpu(i) { \ |
@@ -1498,35 +1870,34 @@ do { \ | |||
1498 | j++; \ | 1870 | j++; \ |
1499 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ | 1871 | per_cpu(rcu_data, i).mynode = &rnp[j]; \ |
1500 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ | 1872 | (rsp)->rda[i] = &per_cpu(rcu_data, i); \ |
1873 | rcu_boot_init_percpu_data(i, rsp); \ | ||
1501 | } \ | 1874 | } \ |
1502 | } while (0) | 1875 | } while (0) |
1503 | 1876 | ||
1504 | static struct notifier_block __cpuinitdata rcu_nb = { | 1877 | void __init rcu_init(void) |
1505 | .notifier_call = rcu_cpu_notify, | ||
1506 | }; | ||
1507 | |||
1508 | void __init __rcu_init(void) | ||
1509 | { | 1878 | { |
1510 | int i; /* All used by RCU_DATA_PTR_INIT(). */ | 1879 | int i; |
1511 | int j; | ||
1512 | struct rcu_node *rnp; | ||
1513 | 1880 | ||
1514 | printk(KERN_WARNING "Experimental hierarchical RCU implementation.\n"); | 1881 | rcu_bootup_announce(); |
1515 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 1882 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
1516 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | 1883 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); |
1517 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | 1884 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ |
1518 | rcu_init_one(&rcu_state); | 1885 | #if NUM_RCU_LVL_4 != 0 |
1519 | RCU_DATA_PTR_INIT(&rcu_state, rcu_data); | 1886 | printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n"); |
1520 | rcu_init_one(&rcu_bh_state); | 1887 | #endif /* #if NUM_RCU_LVL_4 != 0 */ |
1521 | RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); | 1888 | RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); |
1889 | RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); | ||
1890 | __rcu_init_preempt(); | ||
1891 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
1522 | 1892 | ||
1893 | /* | ||
1894 | * We don't need protection against CPU-hotplug here because | ||
1895 | * this is called early in boot, before either interrupts | ||
1896 | * or the scheduler are operational. | ||
1897 | */ | ||
1898 | cpu_notifier(rcu_cpu_notify, 0); | ||
1523 | for_each_online_cpu(i) | 1899 | for_each_online_cpu(i) |
1524 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i); | 1900 | rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i); |
1525 | /* Register notifier for non-boot CPUs */ | ||
1526 | register_cpu_notifier(&rcu_nb); | ||
1527 | printk(KERN_WARNING "Experimental hierarchical RCU init done.\n"); | ||
1528 | } | 1901 | } |
1529 | 1902 | ||
1530 | module_param(blimit, int, 0); | 1903 | #include "rcutree_plugin.h" |
1531 | module_param(qhimark, int, 0); | ||
1532 | module_param(qlowmark, int, 0); | ||