aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h1192
1 files changed, 1040 insertions, 152 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 0e4f420245d9..8aafbb80b8b0 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic 3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics. 4 * or preemptible semantics.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/stop_machine.h>
28 29
29/* 30/*
30 * Check the RCU kernel configuration parameters and print informative 31 * Check the RCU kernel configuration parameters and print informative
@@ -53,11 +54,7 @@ static void __init rcu_bootup_announce_oddness(void)
53#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE 54#ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
54 printk(KERN_INFO "\tRCU torture testing starts during boot.\n"); 55 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
55#endif 56#endif
56#ifndef CONFIG_RCU_CPU_STALL_DETECTOR 57#if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
57 printk(KERN_INFO
58 "\tRCU-based detection of stalled CPUs is disabled.\n");
59#endif
60#ifndef CONFIG_RCU_CPU_STALL_VERBOSE
61 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n"); 58 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
62#endif 59#endif
63#if NUM_RCU_LVL_4 != 0 60#if NUM_RCU_LVL_4 != 0
@@ -69,7 +66,9 @@ static void __init rcu_bootup_announce_oddness(void)
69 66
70struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); 67struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
71DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); 68DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
69static struct rcu_state *rcu_state = &rcu_preempt_state;
72 70
71static void rcu_read_unlock_special(struct task_struct *t);
73static int rcu_preempted_readers_exp(struct rcu_node *rnp); 72static int rcu_preempted_readers_exp(struct rcu_node *rnp);
74 73
75/* 74/*
@@ -77,7 +76,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp);
77 */ 76 */
78static void __init rcu_bootup_announce(void) 77static void __init rcu_bootup_announce(void)
79{ 78{
80 printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n"); 79 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
81 rcu_bootup_announce_oddness(); 80 rcu_bootup_announce_oddness();
82} 81}
83 82
@@ -110,7 +109,7 @@ void rcu_force_quiescent_state(void)
110EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 109EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
111 110
112/* 111/*
113 * Record a preemptable-RCU quiescent state for the specified CPU. Note 112 * Record a preemptible-RCU quiescent state for the specified CPU. Note
114 * that this just means that the task currently running on the CPU is 113 * that this just means that the task currently running on the CPU is
115 * not in a quiescent state. There might be any number of tasks blocked 114 * not in a quiescent state. There might be any number of tasks blocked
116 * while in an RCU read-side critical section. 115 * while in an RCU read-side critical section.
@@ -133,12 +132,12 @@ static void rcu_preempt_qs(int cpu)
133 * We have entered the scheduler, and the current task might soon be 132 * We have entered the scheduler, and the current task might soon be
134 * context-switched away from. If this task is in an RCU read-side 133 * context-switched away from. If this task is in an RCU read-side
135 * critical section, we will no longer be able to rely on the CPU to 134 * critical section, we will no longer be able to rely on the CPU to
136 * record that fact, so we enqueue the task on the appropriate entry 135 * record that fact, so we enqueue the task on the blkd_tasks list.
137 * of the blocked_tasks[] array. The task will dequeue itself when 136 * The task will dequeue itself when it exits the outermost enclosing
138 * it exits the outermost enclosing RCU read-side critical section. 137 * RCU read-side critical section. Therefore, the current grace period
139 * Therefore, the current grace period cannot be permitted to complete 138 * cannot be permitted to complete until the blkd_tasks list entries
140 * until the blocked_tasks[] entry indexed by the low-order bit of 139 * predating the current grace period drain, in other words, until
141 * rnp->gpnum empties. 140 * rnp->gp_tasks becomes NULL.
142 * 141 *
143 * Caller must disable preemption. 142 * Caller must disable preemption.
144 */ 143 */
@@ -146,15 +145,14 @@ static void rcu_preempt_note_context_switch(int cpu)
146{ 145{
147 struct task_struct *t = current; 146 struct task_struct *t = current;
148 unsigned long flags; 147 unsigned long flags;
149 int phase;
150 struct rcu_data *rdp; 148 struct rcu_data *rdp;
151 struct rcu_node *rnp; 149 struct rcu_node *rnp;
152 150
153 if (t->rcu_read_lock_nesting && 151 if (t->rcu_read_lock_nesting > 0 &&
154 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 152 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
155 153
156 /* Possibly blocking in an RCU read-side critical section. */ 154 /* Possibly blocking in an RCU read-side critical section. */
157 rdp = rcu_preempt_state.rda[cpu]; 155 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
158 rnp = rdp->mynode; 156 rnp = rdp->mynode;
159 raw_spin_lock_irqsave(&rnp->lock, flags); 157 raw_spin_lock_irqsave(&rnp->lock, flags);
160 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 158 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -168,16 +166,39 @@ static void rcu_preempt_note_context_switch(int cpu)
168 * (i.e., this CPU has not yet passed through a quiescent 166 * (i.e., this CPU has not yet passed through a quiescent
169 * state for the current grace period), then as long 167 * state for the current grace period), then as long
170 * as that task remains queued, the current grace period 168 * as that task remains queued, the current grace period
171 * cannot end. 169 * cannot end. Note that there is some uncertainty as
170 * to exactly when the current grace period started.
171 * We take a conservative approach, which can result
172 * in unnecessarily waiting on tasks that started very
173 * slightly after the current grace period began. C'est
174 * la vie!!!
172 * 175 *
173 * But first, note that the current CPU must still be 176 * But first, note that the current CPU must still be
174 * on line! 177 * on line!
175 */ 178 */
176 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); 179 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
177 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 180 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
178 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; 181 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
179 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); 182 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
183 rnp->gp_tasks = &t->rcu_node_entry;
184#ifdef CONFIG_RCU_BOOST
185 if (rnp->boost_tasks != NULL)
186 rnp->boost_tasks = rnp->gp_tasks;
187#endif /* #ifdef CONFIG_RCU_BOOST */
188 } else {
189 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
190 if (rnp->qsmask & rdp->grpmask)
191 rnp->gp_tasks = &t->rcu_node_entry;
192 }
180 raw_spin_unlock_irqrestore(&rnp->lock, flags); 193 raw_spin_unlock_irqrestore(&rnp->lock, flags);
194 } else if (t->rcu_read_lock_nesting < 0 &&
195 t->rcu_read_unlock_special) {
196
197 /*
198 * Complete exit from RCU read-side critical section on
199 * behalf of preempted instance of __rcu_read_unlock().
200 */
201 rcu_read_unlock_special(t);
181 } 202 }
182 203
183 /* 204 /*
@@ -195,13 +216,13 @@ static void rcu_preempt_note_context_switch(int cpu)
195} 216}
196 217
197/* 218/*
198 * Tree-preemptable RCU implementation for rcu_read_lock(). 219 * Tree-preemptible RCU implementation for rcu_read_lock().
199 * Just increment ->rcu_read_lock_nesting, shared state will be updated 220 * Just increment ->rcu_read_lock_nesting, shared state will be updated
200 * if we block. 221 * if we block.
201 */ 222 */
202void __rcu_read_lock(void) 223void __rcu_read_lock(void)
203{ 224{
204 ACCESS_ONCE(current->rcu_read_lock_nesting)++; 225 current->rcu_read_lock_nesting++;
205 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */ 226 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
206} 227}
207EXPORT_SYMBOL_GPL(__rcu_read_lock); 228EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -211,12 +232,9 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock);
211 * for the specified rcu_node structure. If the caller needs a reliable 232 * for the specified rcu_node structure. If the caller needs a reliable
212 * answer, it must hold the rcu_node's ->lock. 233 * answer, it must hold the rcu_node's ->lock.
213 */ 234 */
214static int rcu_preempted_readers(struct rcu_node *rnp) 235static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
215{ 236{
216 int phase = rnp->gpnum & 0x1; 237 return rnp->gp_tasks != NULL;
217
218 return !list_empty(&rnp->blocked_tasks[phase]) ||
219 !list_empty(&rnp->blocked_tasks[phase + 2]);
220} 238}
221 239
222/* 240/*
@@ -232,7 +250,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
232 unsigned long mask; 250 unsigned long mask;
233 struct rcu_node *rnp_p; 251 struct rcu_node *rnp_p;
234 252
235 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { 253 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
236 raw_spin_unlock_irqrestore(&rnp->lock, flags); 254 raw_spin_unlock_irqrestore(&rnp->lock, flags);
237 return; /* Still need more quiescent states! */ 255 return; /* Still need more quiescent states! */
238 } 256 }
@@ -256,15 +274,31 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
256} 274}
257 275
258/* 276/*
277 * Advance a ->blkd_tasks-list pointer to the next entry, instead
278 * returning NULL if at the end of the list.
279 */
280static struct list_head *rcu_next_node_entry(struct task_struct *t,
281 struct rcu_node *rnp)
282{
283 struct list_head *np;
284
285 np = t->rcu_node_entry.next;
286 if (np == &rnp->blkd_tasks)
287 np = NULL;
288 return np;
289}
290
291/*
259 * Handle special cases during rcu_read_unlock(), such as needing to 292 * Handle special cases during rcu_read_unlock(), such as needing to
260 * notify RCU core processing or task having blocked during the RCU 293 * notify RCU core processing or task having blocked during the RCU
261 * read-side critical section. 294 * read-side critical section.
262 */ 295 */
263static void rcu_read_unlock_special(struct task_struct *t) 296static noinline void rcu_read_unlock_special(struct task_struct *t)
264{ 297{
265 int empty; 298 int empty;
266 int empty_exp; 299 int empty_exp;
267 unsigned long flags; 300 unsigned long flags;
301 struct list_head *np;
268 struct rcu_node *rnp; 302 struct rcu_node *rnp;
269 int special; 303 int special;
270 304
@@ -284,7 +318,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
284 } 318 }
285 319
286 /* Hardware IRQ handlers cannot block. */ 320 /* Hardware IRQ handlers cannot block. */
287 if (in_irq()) { 321 if (in_irq() || in_serving_softirq()) {
288 local_irq_restore(flags); 322 local_irq_restore(flags);
289 return; 323 return;
290 } 324 }
@@ -305,10 +339,24 @@ static void rcu_read_unlock_special(struct task_struct *t)
305 break; 339 break;
306 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 340 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
307 } 341 }
308 empty = !rcu_preempted_readers(rnp); 342 empty = !rcu_preempt_blocked_readers_cgp(rnp);
309 empty_exp = !rcu_preempted_readers_exp(rnp); 343 empty_exp = !rcu_preempted_readers_exp(rnp);
310 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 344 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
345 np = rcu_next_node_entry(t, rnp);
311 list_del_init(&t->rcu_node_entry); 346 list_del_init(&t->rcu_node_entry);
347 if (&t->rcu_node_entry == rnp->gp_tasks)
348 rnp->gp_tasks = np;
349 if (&t->rcu_node_entry == rnp->exp_tasks)
350 rnp->exp_tasks = np;
351#ifdef CONFIG_RCU_BOOST
352 if (&t->rcu_node_entry == rnp->boost_tasks)
353 rnp->boost_tasks = np;
354 /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
355 if (t->rcu_boosted) {
356 special |= RCU_READ_UNLOCK_BOOSTED;
357 t->rcu_boosted = 0;
358 }
359#endif /* #ifdef CONFIG_RCU_BOOST */
312 t->rcu_blocked_node = NULL; 360 t->rcu_blocked_node = NULL;
313 361
314 /* 362 /*
@@ -321,6 +369,14 @@ static void rcu_read_unlock_special(struct task_struct *t)
321 else 369 else
322 rcu_report_unblock_qs_rnp(rnp, flags); 370 rcu_report_unblock_qs_rnp(rnp, flags);
323 371
372#ifdef CONFIG_RCU_BOOST
373 /* Unboost if we were boosted. */
374 if (special & RCU_READ_UNLOCK_BOOSTED) {
375 rt_mutex_unlock(t->rcu_boost_mutex);
376 t->rcu_boost_mutex = NULL;
377 }
378#endif /* #ifdef CONFIG_RCU_BOOST */
379
324 /* 380 /*
325 * If this was the last task on the expedited lists, 381 * If this was the last task on the expedited lists,
326 * then we need to report up the rcu_node hierarchy. 382 * then we need to report up the rcu_node hierarchy.
@@ -333,7 +389,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
333} 389}
334 390
335/* 391/*
336 * Tree-preemptable RCU implementation for rcu_read_unlock(). 392 * Tree-preemptible RCU implementation for rcu_read_unlock().
337 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost 393 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
338 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then 394 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
339 * invoke rcu_read_unlock_special() to clean up after a context switch 395 * invoke rcu_read_unlock_special() to clean up after a context switch
@@ -344,17 +400,26 @@ void __rcu_read_unlock(void)
344 struct task_struct *t = current; 400 struct task_struct *t = current;
345 401
346 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */ 402 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
347 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 && 403 if (t->rcu_read_lock_nesting != 1)
348 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) 404 --t->rcu_read_lock_nesting;
349 rcu_read_unlock_special(t); 405 else {
406 t->rcu_read_lock_nesting = INT_MIN;
407 barrier(); /* assign before ->rcu_read_unlock_special load */
408 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
409 rcu_read_unlock_special(t);
410 barrier(); /* ->rcu_read_unlock_special load before assign */
411 t->rcu_read_lock_nesting = 0;
412 }
350#ifdef CONFIG_PROVE_LOCKING 413#ifdef CONFIG_PROVE_LOCKING
351 WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0); 414 {
415 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
416
417 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
418 }
352#endif /* #ifdef CONFIG_PROVE_LOCKING */ 419#endif /* #ifdef CONFIG_PROVE_LOCKING */
353} 420}
354EXPORT_SYMBOL_GPL(__rcu_read_unlock); 421EXPORT_SYMBOL_GPL(__rcu_read_unlock);
355 422
356#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
357
358#ifdef CONFIG_RCU_CPU_STALL_VERBOSE 423#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
359 424
360/* 425/*
@@ -364,18 +429,16 @@ EXPORT_SYMBOL_GPL(__rcu_read_unlock);
364static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) 429static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
365{ 430{
366 unsigned long flags; 431 unsigned long flags;
367 struct list_head *lp;
368 int phase;
369 struct task_struct *t; 432 struct task_struct *t;
370 433
371 if (rcu_preempted_readers(rnp)) { 434 if (!rcu_preempt_blocked_readers_cgp(rnp))
372 raw_spin_lock_irqsave(&rnp->lock, flags); 435 return;
373 phase = rnp->gpnum & 0x1; 436 raw_spin_lock_irqsave(&rnp->lock, flags);
374 lp = &rnp->blocked_tasks[phase]; 437 t = list_entry(rnp->gp_tasks,
375 list_for_each_entry(t, lp, rcu_node_entry) 438 struct task_struct, rcu_node_entry);
376 sched_show_task(t); 439 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
377 raw_spin_unlock_irqrestore(&rnp->lock, flags); 440 sched_show_task(t);
378 } 441 raw_spin_unlock_irqrestore(&rnp->lock, flags);
379} 442}
380 443
381/* 444/*
@@ -405,19 +468,25 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
405 */ 468 */
406static void rcu_print_task_stall(struct rcu_node *rnp) 469static void rcu_print_task_stall(struct rcu_node *rnp)
407{ 470{
408 struct list_head *lp;
409 int phase;
410 struct task_struct *t; 471 struct task_struct *t;
411 472
412 if (rcu_preempted_readers(rnp)) { 473 if (!rcu_preempt_blocked_readers_cgp(rnp))
413 phase = rnp->gpnum & 0x1; 474 return;
414 lp = &rnp->blocked_tasks[phase]; 475 t = list_entry(rnp->gp_tasks,
415 list_for_each_entry(t, lp, rcu_node_entry) 476 struct task_struct, rcu_node_entry);
416 printk(" P%d", t->pid); 477 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
417 } 478 printk(" P%d", t->pid);
418} 479}
419 480
420#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 481/*
482 * Suppress preemptible RCU's CPU stall warnings by pushing the
483 * time of the next stall-warning message comfortably far into the
484 * future.
485 */
486static void rcu_preempt_stall_reset(void)
487{
488 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
489}
421 490
422/* 491/*
423 * Check that the list of blocked tasks for the newly completed grace 492 * Check that the list of blocked tasks for the newly completed grace
@@ -425,10 +494,15 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
425 * period that still has RCU readers blocked! This function must be 494 * period that still has RCU readers blocked! This function must be
426 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock 495 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
427 * must be held by the caller. 496 * must be held by the caller.
497 *
498 * Also, if there are blocked tasks on the list, they automatically
499 * block the newly created grace period, so set up ->gp_tasks accordingly.
428 */ 500 */
429static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 501static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
430{ 502{
431 WARN_ON_ONCE(rcu_preempted_readers(rnp)); 503 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
504 if (!list_empty(&rnp->blkd_tasks))
505 rnp->gp_tasks = rnp->blkd_tasks.next;
432 WARN_ON_ONCE(rnp->qsmask); 506 WARN_ON_ONCE(rnp->qsmask);
433} 507}
434 508
@@ -452,50 +526,68 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
452 struct rcu_node *rnp, 526 struct rcu_node *rnp,
453 struct rcu_data *rdp) 527 struct rcu_data *rdp)
454{ 528{
455 int i;
456 struct list_head *lp; 529 struct list_head *lp;
457 struct list_head *lp_root; 530 struct list_head *lp_root;
458 int retval = 0; 531 int retval = 0;
459 struct rcu_node *rnp_root = rcu_get_root(rsp); 532 struct rcu_node *rnp_root = rcu_get_root(rsp);
460 struct task_struct *tp; 533 struct task_struct *t;
461 534
462 if (rnp == rnp_root) { 535 if (rnp == rnp_root) {
463 WARN_ONCE(1, "Last CPU thought to be offlined?"); 536 WARN_ONCE(1, "Last CPU thought to be offlined?");
464 return 0; /* Shouldn't happen: at least one CPU online. */ 537 return 0; /* Shouldn't happen: at least one CPU online. */
465 } 538 }
466 WARN_ON_ONCE(rnp != rdp->mynode && 539
467 (!list_empty(&rnp->blocked_tasks[0]) || 540 /* If we are on an internal node, complain bitterly. */
468 !list_empty(&rnp->blocked_tasks[1]) || 541 WARN_ON_ONCE(rnp != rdp->mynode);
469 !list_empty(&rnp->blocked_tasks[2]) ||
470 !list_empty(&rnp->blocked_tasks[3])));
471 542
472 /* 543 /*
473 * Move tasks up to root rcu_node. Rely on the fact that the 544 * Move tasks up to root rcu_node. Don't try to get fancy for
474 * root rcu_node can be at most one ahead of the rest of the 545 * this corner-case operation -- just put this node's tasks
475 * rcu_nodes in terms of gp_num value. This fact allows us to 546 * at the head of the root node's list, and update the root node's
476 * move the blocked_tasks[] array directly, element by element. 547 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
548 * if non-NULL. This might result in waiting for more tasks than
549 * absolutely necessary, but this is a good performance/complexity
550 * tradeoff.
477 */ 551 */
478 if (rcu_preempted_readers(rnp)) 552 if (rcu_preempt_blocked_readers_cgp(rnp))
479 retval |= RCU_OFL_TASKS_NORM_GP; 553 retval |= RCU_OFL_TASKS_NORM_GP;
480 if (rcu_preempted_readers_exp(rnp)) 554 if (rcu_preempted_readers_exp(rnp))
481 retval |= RCU_OFL_TASKS_EXP_GP; 555 retval |= RCU_OFL_TASKS_EXP_GP;
482 for (i = 0; i < 4; i++) { 556 lp = &rnp->blkd_tasks;
483 lp = &rnp->blocked_tasks[i]; 557 lp_root = &rnp_root->blkd_tasks;
484 lp_root = &rnp_root->blocked_tasks[i]; 558 while (!list_empty(lp)) {
485 while (!list_empty(lp)) { 559 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
486 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); 560 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
487 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ 561 list_del(&t->rcu_node_entry);
488 list_del(&tp->rcu_node_entry); 562 t->rcu_blocked_node = rnp_root;
489 tp->rcu_blocked_node = rnp_root; 563 list_add(&t->rcu_node_entry, lp_root);
490 list_add(&tp->rcu_node_entry, lp_root); 564 if (&t->rcu_node_entry == rnp->gp_tasks)
491 raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 565 rnp_root->gp_tasks = rnp->gp_tasks;
492 } 566 if (&t->rcu_node_entry == rnp->exp_tasks)
567 rnp_root->exp_tasks = rnp->exp_tasks;
568#ifdef CONFIG_RCU_BOOST
569 if (&t->rcu_node_entry == rnp->boost_tasks)
570 rnp_root->boost_tasks = rnp->boost_tasks;
571#endif /* #ifdef CONFIG_RCU_BOOST */
572 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
493 } 573 }
574
575#ifdef CONFIG_RCU_BOOST
576 /* In case root is being boosted and leaf is not. */
577 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
578 if (rnp_root->boost_tasks != NULL &&
579 rnp_root->boost_tasks != rnp_root->gp_tasks)
580 rnp_root->boost_tasks = rnp_root->gp_tasks;
581 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
582#endif /* #ifdef CONFIG_RCU_BOOST */
583
584 rnp->gp_tasks = NULL;
585 rnp->exp_tasks = NULL;
494 return retval; 586 return retval;
495} 587}
496 588
497/* 589/*
498 * Do CPU-offline processing for preemptable RCU. 590 * Do CPU-offline processing for preemptible RCU.
499 */ 591 */
500static void rcu_preempt_offline_cpu(int cpu) 592static void rcu_preempt_offline_cpu(int cpu)
501{ 593{
@@ -519,12 +611,13 @@ static void rcu_preempt_check_callbacks(int cpu)
519 rcu_preempt_qs(cpu); 611 rcu_preempt_qs(cpu);
520 return; 612 return;
521 } 613 }
522 if (per_cpu(rcu_preempt_data, cpu).qs_pending) 614 if (t->rcu_read_lock_nesting > 0 &&
615 per_cpu(rcu_preempt_data, cpu).qs_pending)
523 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; 616 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
524} 617}
525 618
526/* 619/*
527 * Process callbacks for preemptable RCU. 620 * Process callbacks for preemptible RCU.
528 */ 621 */
529static void rcu_preempt_process_callbacks(void) 622static void rcu_preempt_process_callbacks(void)
530{ 623{
@@ -532,8 +625,17 @@ static void rcu_preempt_process_callbacks(void)
532 &__get_cpu_var(rcu_preempt_data)); 625 &__get_cpu_var(rcu_preempt_data));
533} 626}
534 627
628#ifdef CONFIG_RCU_BOOST
629
630static void rcu_preempt_do_callbacks(void)
631{
632 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
633}
634
635#endif /* #ifdef CONFIG_RCU_BOOST */
636
535/* 637/*
536 * Queue a preemptable-RCU callback for invocation after a grace period. 638 * Queue a preemptible-RCU callback for invocation after a grace period.
537 */ 639 */
538void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) 640void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
539{ 641{
@@ -546,9 +648,11 @@ EXPORT_SYMBOL_GPL(call_rcu);
546 * 648 *
547 * Control will return to the caller some time after a full grace 649 * Control will return to the caller some time after a full grace
548 * period has elapsed, in other words after all currently executing RCU 650 * period has elapsed, in other words after all currently executing RCU
549 * read-side critical sections have completed. RCU read-side critical 651 * read-side critical sections have completed. Note, however, that
550 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 652 * upon return from synchronize_rcu(), the caller might well be executing
551 * and may be nested. 653 * concurrently with new RCU read-side critical sections that began while
654 * synchronize_rcu() was waiting. RCU read-side critical sections are
655 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
552 */ 656 */
553void synchronize_rcu(void) 657void synchronize_rcu(void)
554{ 658{
@@ -579,8 +683,7 @@ static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
579 */ 683 */
580static int rcu_preempted_readers_exp(struct rcu_node *rnp) 684static int rcu_preempted_readers_exp(struct rcu_node *rnp)
581{ 685{
582 return !list_empty(&rnp->blocked_tasks[2]) || 686 return rnp->exp_tasks != NULL;
583 !list_empty(&rnp->blocked_tasks[3]);
584} 687}
585 688
586/* 689/*
@@ -615,9 +718,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
615 718
616 raw_spin_lock_irqsave(&rnp->lock, flags); 719 raw_spin_lock_irqsave(&rnp->lock, flags);
617 for (;;) { 720 for (;;) {
618 if (!sync_rcu_preempt_exp_done(rnp)) 721 if (!sync_rcu_preempt_exp_done(rnp)) {
722 raw_spin_unlock_irqrestore(&rnp->lock, flags);
619 break; 723 break;
724 }
620 if (rnp->parent == NULL) { 725 if (rnp->parent == NULL) {
726 raw_spin_unlock_irqrestore(&rnp->lock, flags);
621 wake_up(&sync_rcu_preempt_exp_wq); 727 wake_up(&sync_rcu_preempt_exp_wq);
622 break; 728 break;
623 } 729 }
@@ -627,7 +733,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
627 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 733 raw_spin_lock(&rnp->lock); /* irqs already disabled */
628 rnp->expmask &= ~mask; 734 rnp->expmask &= ~mask;
629 } 735 }
630 raw_spin_unlock_irqrestore(&rnp->lock, flags);
631} 736}
632 737
633/* 738/*
@@ -640,13 +745,17 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
640static void 745static void
641sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) 746sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
642{ 747{
643 int must_wait; 748 unsigned long flags;
749 int must_wait = 0;
644 750
645 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 751 raw_spin_lock_irqsave(&rnp->lock, flags);
646 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); 752 if (list_empty(&rnp->blkd_tasks))
647 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); 753 raw_spin_unlock_irqrestore(&rnp->lock, flags);
648 must_wait = rcu_preempted_readers_exp(rnp); 754 else {
649 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ 755 rnp->exp_tasks = rnp->blkd_tasks.next;
756 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
757 must_wait = 1;
758 }
650 if (!must_wait) 759 if (!must_wait)
651 rcu_report_exp_rnp(rsp, rnp); 760 rcu_report_exp_rnp(rsp, rnp);
652} 761}
@@ -654,9 +763,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
654/* 763/*
655 * Wait for an rcu-preempt grace period, but expedite it. The basic idea 764 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
656 * is to invoke synchronize_sched_expedited() to push all the tasks to 765 * is to invoke synchronize_sched_expedited() to push all the tasks to
657 * the ->blocked_tasks[] lists, move all entries from the first set of 766 * the ->blkd_tasks lists and wait for this list to drain.
658 * ->blocked_tasks[] lists to the second set, and finally wait for this
659 * second set to drain.
660 */ 767 */
661void synchronize_rcu_expedited(void) 768void synchronize_rcu_expedited(void)
662{ 769{
@@ -688,7 +795,7 @@ void synchronize_rcu_expedited(void)
688 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) 795 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
689 goto unlock_mb_ret; /* Others did our work for us. */ 796 goto unlock_mb_ret; /* Others did our work for us. */
690 797
691 /* force all RCU readers onto blocked_tasks[]. */ 798 /* force all RCU readers onto ->blkd_tasks lists. */
692 synchronize_sched_expedited(); 799 synchronize_sched_expedited();
693 800
694 raw_spin_lock_irqsave(&rsp->onofflock, flags); 801 raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -700,7 +807,7 @@ void synchronize_rcu_expedited(void)
700 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 807 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
701 } 808 }
702 809
703 /* Snapshot current state of ->blocked_tasks[] lists. */ 810 /* Snapshot current state of ->blkd_tasks lists. */
704 rcu_for_each_leaf_node(rsp, rnp) 811 rcu_for_each_leaf_node(rsp, rnp)
705 sync_rcu_preempt_exp_init(rsp, rnp); 812 sync_rcu_preempt_exp_init(rsp, rnp);
706 if (NUM_RCU_NODES > 1) 813 if (NUM_RCU_NODES > 1)
@@ -708,7 +815,7 @@ void synchronize_rcu_expedited(void)
708 815
709 raw_spin_unlock_irqrestore(&rsp->onofflock, flags); 816 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
710 817
711 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ 818 /* Wait for snapshotted ->blkd_tasks lists to drain. */
712 rnp = rcu_get_root(rsp); 819 rnp = rcu_get_root(rsp);
713 wait_event(sync_rcu_preempt_exp_wq, 820 wait_event(sync_rcu_preempt_exp_wq,
714 sync_rcu_preempt_exp_done(rnp)); 821 sync_rcu_preempt_exp_done(rnp));
@@ -724,7 +831,7 @@ mb_ret:
724EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); 831EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
725 832
726/* 833/*
727 * Check to see if there is any immediate preemptable-RCU-related work 834 * Check to see if there is any immediate preemptible-RCU-related work
728 * to be done. 835 * to be done.
729 */ 836 */
730static int rcu_preempt_pending(int cpu) 837static int rcu_preempt_pending(int cpu)
@@ -734,7 +841,7 @@ static int rcu_preempt_pending(int cpu)
734} 841}
735 842
736/* 843/*
737 * Does preemptable RCU need the CPU to stay out of dynticks mode? 844 * Does preemptible RCU need the CPU to stay out of dynticks mode?
738 */ 845 */
739static int rcu_preempt_needs_cpu(int cpu) 846static int rcu_preempt_needs_cpu(int cpu)
740{ 847{
@@ -751,7 +858,7 @@ void rcu_barrier(void)
751EXPORT_SYMBOL_GPL(rcu_barrier); 858EXPORT_SYMBOL_GPL(rcu_barrier);
752 859
753/* 860/*
754 * Initialize preemptable RCU's per-CPU data. 861 * Initialize preemptible RCU's per-CPU data.
755 */ 862 */
756static void __cpuinit rcu_preempt_init_percpu_data(int cpu) 863static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
757{ 864{
@@ -759,23 +866,23 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
759} 866}
760 867
761/* 868/*
762 * Move preemptable RCU's callbacks to ->orphan_cbs_list. 869 * Move preemptible RCU's callbacks from dying CPU to other online CPU.
763 */ 870 */
764static void rcu_preempt_send_cbs_to_orphanage(void) 871static void rcu_preempt_send_cbs_to_online(void)
765{ 872{
766 rcu_send_cbs_to_orphanage(&rcu_preempt_state); 873 rcu_send_cbs_to_online(&rcu_preempt_state);
767} 874}
768 875
769/* 876/*
770 * Initialize preemptable RCU's state structures. 877 * Initialize preemptible RCU's state structures.
771 */ 878 */
772static void __init __rcu_init_preempt(void) 879static void __init __rcu_init_preempt(void)
773{ 880{
774 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data); 881 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
775} 882}
776 883
777/* 884/*
778 * Check for a task exiting while in a preemptable-RCU read-side 885 * Check for a task exiting while in a preemptible-RCU read-side
779 * critical section, clean up if so. No need to issue warnings, 886 * critical section, clean up if so. No need to issue warnings,
780 * as debug_check_no_locks_held() already does this if lockdep 887 * as debug_check_no_locks_held() already does this if lockdep
781 * is enabled. 888 * is enabled.
@@ -787,11 +894,13 @@ void exit_rcu(void)
787 if (t->rcu_read_lock_nesting == 0) 894 if (t->rcu_read_lock_nesting == 0)
788 return; 895 return;
789 t->rcu_read_lock_nesting = 1; 896 t->rcu_read_lock_nesting = 1;
790 rcu_read_unlock(); 897 __rcu_read_unlock();
791} 898}
792 899
793#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 900#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
794 901
902static struct rcu_state *rcu_state = &rcu_sched_state;
903
795/* 904/*
796 * Tell them what RCU they are running. 905 * Tell them what RCU they are running.
797 */ 906 */
@@ -821,7 +930,7 @@ void rcu_force_quiescent_state(void)
821EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 930EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
822 931
823/* 932/*
824 * Because preemptable RCU does not exist, we never have to check for 933 * Because preemptible RCU does not exist, we never have to check for
825 * CPUs being in quiescent states. 934 * CPUs being in quiescent states.
826 */ 935 */
827static void rcu_preempt_note_context_switch(int cpu) 936static void rcu_preempt_note_context_switch(int cpu)
@@ -829,10 +938,10 @@ static void rcu_preempt_note_context_switch(int cpu)
829} 938}
830 939
831/* 940/*
832 * Because preemptable RCU does not exist, there are never any preempted 941 * Because preemptible RCU does not exist, there are never any preempted
833 * RCU readers. 942 * RCU readers.
834 */ 943 */
835static int rcu_preempted_readers(struct rcu_node *rnp) 944static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
836{ 945{
837 return 0; 946 return 0;
838} 947}
@@ -847,10 +956,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
847 956
848#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 957#endif /* #ifdef CONFIG_HOTPLUG_CPU */
849 958
850#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
851
852/* 959/*
853 * Because preemptable RCU does not exist, we never have to check for 960 * Because preemptible RCU does not exist, we never have to check for
854 * tasks blocked within RCU read-side critical sections. 961 * tasks blocked within RCU read-side critical sections.
855 */ 962 */
856static void rcu_print_detail_task_stall(struct rcu_state *rsp) 963static void rcu_print_detail_task_stall(struct rcu_state *rsp)
@@ -858,17 +965,23 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
858} 965}
859 966
860/* 967/*
861 * Because preemptable RCU does not exist, we never have to check for 968 * Because preemptible RCU does not exist, we never have to check for
862 * tasks blocked within RCU read-side critical sections. 969 * tasks blocked within RCU read-side critical sections.
863 */ 970 */
864static void rcu_print_task_stall(struct rcu_node *rnp) 971static void rcu_print_task_stall(struct rcu_node *rnp)
865{ 972{
866} 973}
867 974
868#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 975/*
976 * Because preemptible RCU does not exist, there is no need to suppress
977 * its CPU stall warnings.
978 */
979static void rcu_preempt_stall_reset(void)
980{
981}
869 982
870/* 983/*
871 * Because there is no preemptable RCU, there can be no readers blocked, 984 * Because there is no preemptible RCU, there can be no readers blocked,
872 * so there is no need to check for blocked tasks. So check only for 985 * so there is no need to check for blocked tasks. So check only for
873 * bogus qsmask values. 986 * bogus qsmask values.
874 */ 987 */
@@ -880,7 +993,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
880#ifdef CONFIG_HOTPLUG_CPU 993#ifdef CONFIG_HOTPLUG_CPU
881 994
882/* 995/*
883 * Because preemptable RCU does not exist, it never needs to migrate 996 * Because preemptible RCU does not exist, it never needs to migrate
884 * tasks that were blocked within RCU read-side critical sections, and 997 * tasks that were blocked within RCU read-side critical sections, and
885 * such non-existent tasks cannot possibly have been blocking the current 998 * such non-existent tasks cannot possibly have been blocking the current
886 * grace period. 999 * grace period.
@@ -893,7 +1006,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
893} 1006}
894 1007
895/* 1008/*
896 * Because preemptable RCU does not exist, it never needs CPU-offline 1009 * Because preemptible RCU does not exist, it never needs CPU-offline
897 * processing. 1010 * processing.
898 */ 1011 */
899static void rcu_preempt_offline_cpu(int cpu) 1012static void rcu_preempt_offline_cpu(int cpu)
@@ -903,7 +1016,7 @@ static void rcu_preempt_offline_cpu(int cpu)
903#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1016#endif /* #ifdef CONFIG_HOTPLUG_CPU */
904 1017
905/* 1018/*
906 * Because preemptable RCU does not exist, it never has any callbacks 1019 * Because preemptible RCU does not exist, it never has any callbacks
907 * to check. 1020 * to check.
908 */ 1021 */
909static void rcu_preempt_check_callbacks(int cpu) 1022static void rcu_preempt_check_callbacks(int cpu)
@@ -911,7 +1024,7 @@ static void rcu_preempt_check_callbacks(int cpu)
911} 1024}
912 1025
913/* 1026/*
914 * Because preemptable RCU does not exist, it never has any callbacks 1027 * Because preemptible RCU does not exist, it never has any callbacks
915 * to process. 1028 * to process.
916 */ 1029 */
917static void rcu_preempt_process_callbacks(void) 1030static void rcu_preempt_process_callbacks(void)
@@ -919,17 +1032,8 @@ static void rcu_preempt_process_callbacks(void)
919} 1032}
920 1033
921/* 1034/*
922 * In classic RCU, call_rcu() is just call_rcu_sched().
923 */
924void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
925{
926 call_rcu_sched(head, func);
927}
928EXPORT_SYMBOL_GPL(call_rcu);
929
930/*
931 * Wait for an rcu-preempt grace period, but make it happen quickly. 1035 * Wait for an rcu-preempt grace period, but make it happen quickly.
932 * But because preemptable RCU does not exist, map to rcu-sched. 1036 * But because preemptible RCU does not exist, map to rcu-sched.
933 */ 1037 */
934void synchronize_rcu_expedited(void) 1038void synchronize_rcu_expedited(void)
935{ 1039{
@@ -940,7 +1044,7 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
940#ifdef CONFIG_HOTPLUG_CPU 1044#ifdef CONFIG_HOTPLUG_CPU
941 1045
942/* 1046/*
943 * Because preemptable RCU does not exist, there is never any need to 1047 * Because preemptible RCU does not exist, there is never any need to
944 * report on tasks preempted in RCU read-side critical sections during 1048 * report on tasks preempted in RCU read-side critical sections during
945 * expedited RCU grace periods. 1049 * expedited RCU grace periods.
946 */ 1050 */
@@ -952,7 +1056,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
952#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1056#endif /* #ifdef CONFIG_HOTPLUG_CPU */
953 1057
954/* 1058/*
955 * Because preemptable RCU does not exist, it never has any work to do. 1059 * Because preemptible RCU does not exist, it never has any work to do.
956 */ 1060 */
957static int rcu_preempt_pending(int cpu) 1061static int rcu_preempt_pending(int cpu)
958{ 1062{
@@ -960,7 +1064,7 @@ static int rcu_preempt_pending(int cpu)
960} 1064}
961 1065
962/* 1066/*
963 * Because preemptable RCU does not exist, it never needs any CPU. 1067 * Because preemptible RCU does not exist, it never needs any CPU.
964 */ 1068 */
965static int rcu_preempt_needs_cpu(int cpu) 1069static int rcu_preempt_needs_cpu(int cpu)
966{ 1070{
@@ -968,7 +1072,7 @@ static int rcu_preempt_needs_cpu(int cpu)
968} 1072}
969 1073
970/* 1074/*
971 * Because preemptable RCU does not exist, rcu_barrier() is just 1075 * Because preemptible RCU does not exist, rcu_barrier() is just
972 * another name for rcu_barrier_sched(). 1076 * another name for rcu_barrier_sched().
973 */ 1077 */
974void rcu_barrier(void) 1078void rcu_barrier(void)
@@ -978,7 +1082,7 @@ void rcu_barrier(void)
978EXPORT_SYMBOL_GPL(rcu_barrier); 1082EXPORT_SYMBOL_GPL(rcu_barrier);
979 1083
980/* 1084/*
981 * Because preemptable RCU does not exist, there is no per-CPU 1085 * Because preemptible RCU does not exist, there is no per-CPU
982 * data to initialize. 1086 * data to initialize.
983 */ 1087 */
984static void __cpuinit rcu_preempt_init_percpu_data(int cpu) 1088static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
@@ -986,14 +1090,14 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
986} 1090}
987 1091
988/* 1092/*
989 * Because there is no preemptable RCU, there are no callbacks to move. 1093 * Because there is no preemptible RCU, there are no callbacks to move.
990 */ 1094 */
991static void rcu_preempt_send_cbs_to_orphanage(void) 1095static void rcu_preempt_send_cbs_to_online(void)
992{ 1096{
993} 1097}
994 1098
995/* 1099/*
996 * Because preemptable RCU does not exist, it need not be initialized. 1100 * Because preemptible RCU does not exist, it need not be initialized.
997 */ 1101 */
998static void __init __rcu_init_preempt(void) 1102static void __init __rcu_init_preempt(void)
999{ 1103{
@@ -1001,6 +1105,791 @@ static void __init __rcu_init_preempt(void)
1001 1105
1002#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 1106#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1003 1107
1108#ifdef CONFIG_RCU_BOOST
1109
1110#include "rtmutex_common.h"
1111
1112#ifdef CONFIG_RCU_TRACE
1113
1114static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1115{
1116 if (list_empty(&rnp->blkd_tasks))
1117 rnp->n_balk_blkd_tasks++;
1118 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1119 rnp->n_balk_exp_gp_tasks++;
1120 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1121 rnp->n_balk_boost_tasks++;
1122 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1123 rnp->n_balk_notblocked++;
1124 else if (rnp->gp_tasks != NULL &&
1125 ULONG_CMP_LT(jiffies, rnp->boost_time))
1126 rnp->n_balk_notyet++;
1127 else
1128 rnp->n_balk_nos++;
1129}
1130
1131#else /* #ifdef CONFIG_RCU_TRACE */
1132
1133static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1134{
1135}
1136
1137#endif /* #else #ifdef CONFIG_RCU_TRACE */
1138
1139/*
1140 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1141 * or ->boost_tasks, advancing the pointer to the next task in the
1142 * ->blkd_tasks list.
1143 *
1144 * Note that irqs must be enabled: boosting the task can block.
1145 * Returns 1 if there are more tasks needing to be boosted.
1146 */
1147static int rcu_boost(struct rcu_node *rnp)
1148{
1149 unsigned long flags;
1150 struct rt_mutex mtx;
1151 struct task_struct *t;
1152 struct list_head *tb;
1153
1154 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1155 return 0; /* Nothing left to boost. */
1156
1157 raw_spin_lock_irqsave(&rnp->lock, flags);
1158
1159 /*
1160 * Recheck under the lock: all tasks in need of boosting
1161 * might exit their RCU read-side critical sections on their own.
1162 */
1163 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1164 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1165 return 0;
1166 }
1167
1168 /*
1169 * Preferentially boost tasks blocking expedited grace periods.
1170 * This cannot starve the normal grace periods because a second
1171 * expedited grace period must boost all blocked tasks, including
1172 * those blocking the pre-existing normal grace period.
1173 */
1174 if (rnp->exp_tasks != NULL) {
1175 tb = rnp->exp_tasks;
1176 rnp->n_exp_boosts++;
1177 } else {
1178 tb = rnp->boost_tasks;
1179 rnp->n_normal_boosts++;
1180 }
1181 rnp->n_tasks_boosted++;
1182
1183 /*
1184 * We boost task t by manufacturing an rt_mutex that appears to
1185 * be held by task t. We leave a pointer to that rt_mutex where
1186 * task t can find it, and task t will release the mutex when it
1187 * exits its outermost RCU read-side critical section. Then
1188 * simply acquiring this artificial rt_mutex will boost task
1189 * t's priority. (Thanks to tglx for suggesting this approach!)
1190 *
1191 * Note that task t must acquire rnp->lock to remove itself from
1192 * the ->blkd_tasks list, which it will do from exit() if from
1193 * nowhere else. We therefore are guaranteed that task t will
1194 * stay around at least until we drop rnp->lock. Note that
1195 * rnp->lock also resolves races between our priority boosting
1196 * and task t's exiting its outermost RCU read-side critical
1197 * section.
1198 */
1199 t = container_of(tb, struct task_struct, rcu_node_entry);
1200 rt_mutex_init_proxy_locked(&mtx, t);
1201 t->rcu_boost_mutex = &mtx;
1202 t->rcu_boosted = 1;
1203 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1204 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1205 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1206
1207 return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL;
1208}
1209
1210/*
1211 * Timer handler to initiate waking up of boost kthreads that
1212 * have yielded the CPU due to excessive numbers of tasks to
1213 * boost. We wake up the per-rcu_node kthread, which in turn
1214 * will wake up the booster kthread.
1215 */
1216static void rcu_boost_kthread_timer(unsigned long arg)
1217{
1218 invoke_rcu_node_kthread((struct rcu_node *)arg);
1219}
1220
1221/*
1222 * Priority-boosting kthread. One per leaf rcu_node and one for the
1223 * root rcu_node.
1224 */
1225static int rcu_boost_kthread(void *arg)
1226{
1227 struct rcu_node *rnp = (struct rcu_node *)arg;
1228 int spincnt = 0;
1229 int more2boost;
1230
1231 for (;;) {
1232 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1233 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1234 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1235 more2boost = rcu_boost(rnp);
1236 if (more2boost)
1237 spincnt++;
1238 else
1239 spincnt = 0;
1240 if (spincnt > 10) {
1241 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1242 spincnt = 0;
1243 }
1244 }
1245 /* NOTREACHED */
1246 return 0;
1247}
1248
1249/*
1250 * Check to see if it is time to start boosting RCU readers that are
1251 * blocking the current grace period, and, if so, tell the per-rcu_node
1252 * kthread to start boosting them. If there is an expedited grace
1253 * period in progress, it is always time to boost.
1254 *
1255 * The caller must hold rnp->lock, which this function releases,
1256 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1257 * so we don't need to worry about it going away.
1258 */
1259static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1260{
1261 struct task_struct *t;
1262
1263 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1264 rnp->n_balk_exp_gp_tasks++;
1265 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1266 return;
1267 }
1268 if (rnp->exp_tasks != NULL ||
1269 (rnp->gp_tasks != NULL &&
1270 rnp->boost_tasks == NULL &&
1271 rnp->qsmask == 0 &&
1272 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1273 if (rnp->exp_tasks == NULL)
1274 rnp->boost_tasks = rnp->gp_tasks;
1275 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1276 t = rnp->boost_kthread_task;
1277 if (t != NULL)
1278 wake_up_process(t);
1279 } else {
1280 rcu_initiate_boost_trace(rnp);
1281 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1282 }
1283}
1284
1285/*
1286 * Wake up the per-CPU kthread to invoke RCU callbacks.
1287 */
1288static void invoke_rcu_callbacks_kthread(void)
1289{
1290 unsigned long flags;
1291
1292 local_irq_save(flags);
1293 __this_cpu_write(rcu_cpu_has_work, 1);
1294 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1295 local_irq_restore(flags);
1296 return;
1297 }
1298 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1299 local_irq_restore(flags);
1300}
1301
1302/*
1303 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1304 * held, so no one should be messing with the existence of the boost
1305 * kthread.
1306 */
1307static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1308 cpumask_var_t cm)
1309{
1310 struct task_struct *t;
1311
1312 t = rnp->boost_kthread_task;
1313 if (t != NULL)
1314 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1315}
1316
1317#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1318
1319/*
1320 * Do priority-boost accounting for the start of a new grace period.
1321 */
1322static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1323{
1324 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1325}
1326
1327/*
1328 * Create an RCU-boost kthread for the specified node if one does not
1329 * already exist. We only create this kthread for preemptible RCU.
1330 * Returns zero if all is well, a negated errno otherwise.
1331 */
1332static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1333 struct rcu_node *rnp,
1334 int rnp_index)
1335{
1336 unsigned long flags;
1337 struct sched_param sp;
1338 struct task_struct *t;
1339
1340 if (&rcu_preempt_state != rsp)
1341 return 0;
1342 rsp->boost = 1;
1343 if (rnp->boost_kthread_task != NULL)
1344 return 0;
1345 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1346 "rcub%d", rnp_index);
1347 if (IS_ERR(t))
1348 return PTR_ERR(t);
1349 raw_spin_lock_irqsave(&rnp->lock, flags);
1350 rnp->boost_kthread_task = t;
1351 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1352 sp.sched_priority = RCU_KTHREAD_PRIO;
1353 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1354 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1355 return 0;
1356}
1357
1358#ifdef CONFIG_HOTPLUG_CPU
1359
1360/*
1361 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1362 */
1363static void rcu_stop_cpu_kthread(int cpu)
1364{
1365 struct task_struct *t;
1366
1367 /* Stop the CPU's kthread. */
1368 t = per_cpu(rcu_cpu_kthread_task, cpu);
1369 if (t != NULL) {
1370 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1371 kthread_stop(t);
1372 }
1373}
1374
1375#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1376
1377static void rcu_kthread_do_work(void)
1378{
1379 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1380 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1381 rcu_preempt_do_callbacks();
1382}
1383
1384/*
1385 * Wake up the specified per-rcu_node-structure kthread.
1386 * Because the per-rcu_node kthreads are immortal, we don't need
1387 * to do anything to keep them alive.
1388 */
1389static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1390{
1391 struct task_struct *t;
1392
1393 t = rnp->node_kthread_task;
1394 if (t != NULL)
1395 wake_up_process(t);
1396}
1397
1398/*
1399 * Set the specified CPU's kthread to run RT or not, as specified by
1400 * the to_rt argument. The CPU-hotplug locks are held, so the task
1401 * is not going away.
1402 */
1403static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1404{
1405 int policy;
1406 struct sched_param sp;
1407 struct task_struct *t;
1408
1409 t = per_cpu(rcu_cpu_kthread_task, cpu);
1410 if (t == NULL)
1411 return;
1412 if (to_rt) {
1413 policy = SCHED_FIFO;
1414 sp.sched_priority = RCU_KTHREAD_PRIO;
1415 } else {
1416 policy = SCHED_NORMAL;
1417 sp.sched_priority = 0;
1418 }
1419 sched_setscheduler_nocheck(t, policy, &sp);
1420}
1421
1422/*
1423 * Timer handler to initiate the waking up of per-CPU kthreads that
1424 * have yielded the CPU due to excess numbers of RCU callbacks.
1425 * We wake up the per-rcu_node kthread, which in turn will wake up
1426 * the booster kthread.
1427 */
1428static void rcu_cpu_kthread_timer(unsigned long arg)
1429{
1430 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1431 struct rcu_node *rnp = rdp->mynode;
1432
1433 atomic_or(rdp->grpmask, &rnp->wakemask);
1434 invoke_rcu_node_kthread(rnp);
1435}
1436
1437/*
1438 * Drop to non-real-time priority and yield, but only after posting a
1439 * timer that will cause us to regain our real-time priority if we
1440 * remain preempted. Either way, we restore our real-time priority
1441 * before returning.
1442 */
1443static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1444{
1445 struct sched_param sp;
1446 struct timer_list yield_timer;
1447
1448 setup_timer_on_stack(&yield_timer, f, arg);
1449 mod_timer(&yield_timer, jiffies + 2);
1450 sp.sched_priority = 0;
1451 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1452 set_user_nice(current, 19);
1453 schedule();
1454 sp.sched_priority = RCU_KTHREAD_PRIO;
1455 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1456 del_timer(&yield_timer);
1457}
1458
1459/*
1460 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1461 * This can happen while the corresponding CPU is either coming online
1462 * or going offline. We cannot wait until the CPU is fully online
1463 * before starting the kthread, because the various notifier functions
1464 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1465 * the corresponding CPU is online.
1466 *
1467 * Return 1 if the kthread needs to stop, 0 otherwise.
1468 *
1469 * Caller must disable bh. This function can momentarily enable it.
1470 */
1471static int rcu_cpu_kthread_should_stop(int cpu)
1472{
1473 while (cpu_is_offline(cpu) ||
1474 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1475 smp_processor_id() != cpu) {
1476 if (kthread_should_stop())
1477 return 1;
1478 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1479 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1480 local_bh_enable();
1481 schedule_timeout_uninterruptible(1);
1482 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1483 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1484 local_bh_disable();
1485 }
1486 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1487 return 0;
1488}
1489
1490/*
1491 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1492 * earlier RCU softirq.
1493 */
1494static int rcu_cpu_kthread(void *arg)
1495{
1496 int cpu = (int)(long)arg;
1497 unsigned long flags;
1498 int spincnt = 0;
1499 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1500 char work;
1501 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1502
1503 for (;;) {
1504 *statusp = RCU_KTHREAD_WAITING;
1505 rcu_wait(*workp != 0 || kthread_should_stop());
1506 local_bh_disable();
1507 if (rcu_cpu_kthread_should_stop(cpu)) {
1508 local_bh_enable();
1509 break;
1510 }
1511 *statusp = RCU_KTHREAD_RUNNING;
1512 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1513 local_irq_save(flags);
1514 work = *workp;
1515 *workp = 0;
1516 local_irq_restore(flags);
1517 if (work)
1518 rcu_kthread_do_work();
1519 local_bh_enable();
1520 if (*workp != 0)
1521 spincnt++;
1522 else
1523 spincnt = 0;
1524 if (spincnt > 10) {
1525 *statusp = RCU_KTHREAD_YIELDING;
1526 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1527 spincnt = 0;
1528 }
1529 }
1530 *statusp = RCU_KTHREAD_STOPPED;
1531 return 0;
1532}
1533
1534/*
1535 * Spawn a per-CPU kthread, setting up affinity and priority.
1536 * Because the CPU hotplug lock is held, no other CPU will be attempting
1537 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1538 * attempting to access it during boot, but the locking in kthread_bind()
1539 * will enforce sufficient ordering.
1540 *
1541 * Please note that we cannot simply refuse to wake up the per-CPU
1542 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1543 * which can result in softlockup complaints if the task ends up being
1544 * idle for more than a couple of minutes.
1545 *
1546 * However, please note also that we cannot bind the per-CPU kthread to its
1547 * CPU until that CPU is fully online. We also cannot wait until the
1548 * CPU is fully online before we create its per-CPU kthread, as this would
1549 * deadlock the system when CPU notifiers tried waiting for grace
1550 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1551 * is online. If its CPU is not yet fully online, then the code in
1552 * rcu_cpu_kthread() will wait until it is fully online, and then do
1553 * the binding.
1554 */
1555static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1556{
1557 struct sched_param sp;
1558 struct task_struct *t;
1559
1560 if (!rcu_scheduler_fully_active ||
1561 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1562 return 0;
1563 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1564 if (IS_ERR(t))
1565 return PTR_ERR(t);
1566 if (cpu_online(cpu))
1567 kthread_bind(t, cpu);
1568 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1569 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1570 sp.sched_priority = RCU_KTHREAD_PRIO;
1571 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1572 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1573 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1574 return 0;
1575}
1576
1577/*
1578 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1579 * kthreads when needed. We ignore requests to wake up kthreads
1580 * for offline CPUs, which is OK because force_quiescent_state()
1581 * takes care of this case.
1582 */
1583static int rcu_node_kthread(void *arg)
1584{
1585 int cpu;
1586 unsigned long flags;
1587 unsigned long mask;
1588 struct rcu_node *rnp = (struct rcu_node *)arg;
1589 struct sched_param sp;
1590 struct task_struct *t;
1591
1592 for (;;) {
1593 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1594 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1595 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1596 raw_spin_lock_irqsave(&rnp->lock, flags);
1597 mask = atomic_xchg(&rnp->wakemask, 0);
1598 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1599 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1600 if ((mask & 0x1) == 0)
1601 continue;
1602 preempt_disable();
1603 t = per_cpu(rcu_cpu_kthread_task, cpu);
1604 if (!cpu_online(cpu) || t == NULL) {
1605 preempt_enable();
1606 continue;
1607 }
1608 per_cpu(rcu_cpu_has_work, cpu) = 1;
1609 sp.sched_priority = RCU_KTHREAD_PRIO;
1610 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1611 preempt_enable();
1612 }
1613 }
1614 /* NOTREACHED */
1615 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1616 return 0;
1617}
1618
1619/*
1620 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1621 * served by the rcu_node in question. The CPU hotplug lock is still
1622 * held, so the value of rnp->qsmaskinit will be stable.
1623 *
1624 * We don't include outgoingcpu in the affinity set, use -1 if there is
1625 * no outgoing CPU. If there are no CPUs left in the affinity set,
1626 * this function allows the kthread to execute on any CPU.
1627 */
1628static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1629{
1630 cpumask_var_t cm;
1631 int cpu;
1632 unsigned long mask = rnp->qsmaskinit;
1633
1634 if (rnp->node_kthread_task == NULL)
1635 return;
1636 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1637 return;
1638 cpumask_clear(cm);
1639 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1640 if ((mask & 0x1) && cpu != outgoingcpu)
1641 cpumask_set_cpu(cpu, cm);
1642 if (cpumask_weight(cm) == 0) {
1643 cpumask_setall(cm);
1644 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1645 cpumask_clear_cpu(cpu, cm);
1646 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1647 }
1648 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1649 rcu_boost_kthread_setaffinity(rnp, cm);
1650 free_cpumask_var(cm);
1651}
1652
1653/*
1654 * Spawn a per-rcu_node kthread, setting priority and affinity.
1655 * Called during boot before online/offline can happen, or, if
1656 * during runtime, with the main CPU-hotplug locks held. So only
1657 * one of these can be executing at a time.
1658 */
1659static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1660 struct rcu_node *rnp)
1661{
1662 unsigned long flags;
1663 int rnp_index = rnp - &rsp->node[0];
1664 struct sched_param sp;
1665 struct task_struct *t;
1666
1667 if (!rcu_scheduler_fully_active ||
1668 rnp->qsmaskinit == 0)
1669 return 0;
1670 if (rnp->node_kthread_task == NULL) {
1671 t = kthread_create(rcu_node_kthread, (void *)rnp,
1672 "rcun%d", rnp_index);
1673 if (IS_ERR(t))
1674 return PTR_ERR(t);
1675 raw_spin_lock_irqsave(&rnp->lock, flags);
1676 rnp->node_kthread_task = t;
1677 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1678 sp.sched_priority = 99;
1679 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1680 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1681 }
1682 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1683}
1684
1685/*
1686 * Spawn all kthreads -- called as soon as the scheduler is running.
1687 */
1688static int __init rcu_spawn_kthreads(void)
1689{
1690 int cpu;
1691 struct rcu_node *rnp;
1692
1693 rcu_scheduler_fully_active = 1;
1694 for_each_possible_cpu(cpu) {
1695 per_cpu(rcu_cpu_has_work, cpu) = 0;
1696 if (cpu_online(cpu))
1697 (void)rcu_spawn_one_cpu_kthread(cpu);
1698 }
1699 rnp = rcu_get_root(rcu_state);
1700 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1701 if (NUM_RCU_NODES > 1) {
1702 rcu_for_each_leaf_node(rcu_state, rnp)
1703 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1704 }
1705 return 0;
1706}
1707early_initcall(rcu_spawn_kthreads);
1708
1709static void __cpuinit rcu_prepare_kthreads(int cpu)
1710{
1711 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1712 struct rcu_node *rnp = rdp->mynode;
1713
1714 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1715 if (rcu_scheduler_fully_active) {
1716 (void)rcu_spawn_one_cpu_kthread(cpu);
1717 if (rnp->node_kthread_task == NULL)
1718 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1719 }
1720}
1721
1722#else /* #ifdef CONFIG_RCU_BOOST */
1723
1724static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1725{
1726 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1727}
1728
1729static void invoke_rcu_callbacks_kthread(void)
1730{
1731 WARN_ON_ONCE(1);
1732}
1733
1734static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1735{
1736}
1737
1738#ifdef CONFIG_HOTPLUG_CPU
1739
1740static void rcu_stop_cpu_kthread(int cpu)
1741{
1742}
1743
1744#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1745
1746static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1747{
1748}
1749
1750static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1751{
1752}
1753
1754static int __init rcu_scheduler_really_started(void)
1755{
1756 rcu_scheduler_fully_active = 1;
1757 return 0;
1758}
1759early_initcall(rcu_scheduler_really_started);
1760
1761static void __cpuinit rcu_prepare_kthreads(int cpu)
1762{
1763}
1764
1765#endif /* #else #ifdef CONFIG_RCU_BOOST */
1766
1767#ifndef CONFIG_SMP
1768
1769void synchronize_sched_expedited(void)
1770{
1771 cond_resched();
1772}
1773EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1774
1775#else /* #ifndef CONFIG_SMP */
1776
1777static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1778static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1779
1780static int synchronize_sched_expedited_cpu_stop(void *data)
1781{
1782 /*
1783 * There must be a full memory barrier on each affected CPU
1784 * between the time that try_stop_cpus() is called and the
1785 * time that it returns.
1786 *
1787 * In the current initial implementation of cpu_stop, the
1788 * above condition is already met when the control reaches
1789 * this point and the following smp_mb() is not strictly
1790 * necessary. Do smp_mb() anyway for documentation and
1791 * robustness against future implementation changes.
1792 */
1793 smp_mb(); /* See above comment block. */
1794 return 0;
1795}
1796
1797/*
1798 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1799 * approach to force grace period to end quickly. This consumes
1800 * significant time on all CPUs, and is thus not recommended for
1801 * any sort of common-case code.
1802 *
1803 * Note that it is illegal to call this function while holding any
1804 * lock that is acquired by a CPU-hotplug notifier. Failing to
1805 * observe this restriction will result in deadlock.
1806 *
1807 * This implementation can be thought of as an application of ticket
1808 * locking to RCU, with sync_sched_expedited_started and
1809 * sync_sched_expedited_done taking on the roles of the halves
1810 * of the ticket-lock word. Each task atomically increments
1811 * sync_sched_expedited_started upon entry, snapshotting the old value,
1812 * then attempts to stop all the CPUs. If this succeeds, then each
1813 * CPU will have executed a context switch, resulting in an RCU-sched
1814 * grace period. We are then done, so we use atomic_cmpxchg() to
1815 * update sync_sched_expedited_done to match our snapshot -- but
1816 * only if someone else has not already advanced past our snapshot.
1817 *
1818 * On the other hand, if try_stop_cpus() fails, we check the value
1819 * of sync_sched_expedited_done. If it has advanced past our
1820 * initial snapshot, then someone else must have forced a grace period
1821 * some time after we took our snapshot. In this case, our work is
1822 * done for us, and we can simply return. Otherwise, we try again,
1823 * but keep our initial snapshot for purposes of checking for someone
1824 * doing our work for us.
1825 *
1826 * If we fail too many times in a row, we fall back to synchronize_sched().
1827 */
1828void synchronize_sched_expedited(void)
1829{
1830 int firstsnap, s, snap, trycount = 0;
1831
1832 /* Note that atomic_inc_return() implies full memory barrier. */
1833 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1834 get_online_cpus();
1835
1836 /*
1837 * Each pass through the following loop attempts to force a
1838 * context switch on each CPU.
1839 */
1840 while (try_stop_cpus(cpu_online_mask,
1841 synchronize_sched_expedited_cpu_stop,
1842 NULL) == -EAGAIN) {
1843 put_online_cpus();
1844
1845 /* No joy, try again later. Or just synchronize_sched(). */
1846 if (trycount++ < 10)
1847 udelay(trycount * num_online_cpus());
1848 else {
1849 synchronize_sched();
1850 return;
1851 }
1852
1853 /* Check to see if someone else did our work for us. */
1854 s = atomic_read(&sync_sched_expedited_done);
1855 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
1856 smp_mb(); /* ensure test happens before caller kfree */
1857 return;
1858 }
1859
1860 /*
1861 * Refetching sync_sched_expedited_started allows later
1862 * callers to piggyback on our grace period. We subtract
1863 * 1 to get the same token that the last incrementer got.
1864 * We retry after they started, so our grace period works
1865 * for them, and they started after our first try, so their
1866 * grace period works for us.
1867 */
1868 get_online_cpus();
1869 snap = atomic_read(&sync_sched_expedited_started) - 1;
1870 smp_mb(); /* ensure read is before try_stop_cpus(). */
1871 }
1872
1873 /*
1874 * Everyone up to our most recent fetch is covered by our grace
1875 * period. Update the counter, but only if our work is still
1876 * relevant -- which it won't be if someone who started later
1877 * than we did beat us to the punch.
1878 */
1879 do {
1880 s = atomic_read(&sync_sched_expedited_done);
1881 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1882 smp_mb(); /* ensure test happens before caller kfree */
1883 break;
1884 }
1885 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
1886
1887 put_online_cpus();
1888}
1889EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1890
1891#endif /* #else #ifndef CONFIG_SMP */
1892
1004#if !defined(CONFIG_RCU_FAST_NO_HZ) 1893#if !defined(CONFIG_RCU_FAST_NO_HZ)
1005 1894
1006/* 1895/*
@@ -1047,14 +1936,13 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1047 * 1936 *
1048 * Because it is not legal to invoke rcu_process_callbacks() with irqs 1937 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1049 * disabled, we do one pass of force_quiescent_state(), then do a 1938 * disabled, we do one pass of force_quiescent_state(), then do a
1050 * raise_softirq() to cause rcu_process_callbacks() to be invoked later. 1939 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1051 * The per-cpu rcu_dyntick_drain variable controls the sequencing. 1940 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1052 */ 1941 */
1053int rcu_needs_cpu(int cpu) 1942int rcu_needs_cpu(int cpu)
1054{ 1943{
1055 int c = 0; 1944 int c = 0;
1056 int snap; 1945 int snap;
1057 int snap_nmi;
1058 int thatcpu; 1946 int thatcpu;
1059 1947
1060 /* Check for being in the holdoff period. */ 1948 /* Check for being in the holdoff period. */
@@ -1065,10 +1953,10 @@ int rcu_needs_cpu(int cpu)
1065 for_each_online_cpu(thatcpu) { 1953 for_each_online_cpu(thatcpu) {
1066 if (thatcpu == cpu) 1954 if (thatcpu == cpu)
1067 continue; 1955 continue;
1068 snap = per_cpu(rcu_dynticks, thatcpu).dynticks; 1956 snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
1069 snap_nmi = per_cpu(rcu_dynticks, thatcpu).dynticks_nmi; 1957 thatcpu).dynticks);
1070 smp_mb(); /* Order sampling of snap with end of grace period. */ 1958 smp_mb(); /* Order sampling of snap with end of grace period. */
1071 if (((snap & 0x1) != 0) || ((snap_nmi & 0x1) != 0)) { 1959 if ((snap & 0x1) != 0) {
1072 per_cpu(rcu_dyntick_drain, cpu) = 0; 1960 per_cpu(rcu_dyntick_drain, cpu) = 0;
1073 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 1961 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
1074 return rcu_needs_cpu_quick_check(cpu); 1962 return rcu_needs_cpu_quick_check(cpu);
@@ -1099,7 +1987,7 @@ int rcu_needs_cpu(int cpu)
1099 1987
1100 /* If RCU callbacks are still pending, RCU still needs this CPU. */ 1988 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1101 if (c) 1989 if (c)
1102 raise_softirq(RCU_SOFTIRQ); 1990 invoke_rcu_core();
1103 return c; 1991 return c;
1104} 1992}
1105 1993