aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h150
1 files changed, 83 insertions, 67 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 8aafbb80b8b0..4b9b9f8a4184 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -27,6 +27,14 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/stop_machine.h> 28#include <linux/stop_machine.h>
29 29
30#define RCU_KTHREAD_PRIO 1
31
32#ifdef CONFIG_RCU_BOOST
33#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34#else
35#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
36#endif
37
30/* 38/*
31 * Check the RCU kernel configuration parameters and print informative 39 * Check the RCU kernel configuration parameters and print informative
32 * messages about anything out of the ordinary. If you like #ifdef, you 40 * messages about anything out of the ordinary. If you like #ifdef, you
@@ -64,7 +72,7 @@ static void __init rcu_bootup_announce_oddness(void)
64 72
65#ifdef CONFIG_TREE_PREEMPT_RCU 73#ifdef CONFIG_TREE_PREEMPT_RCU
66 74
67struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); 75struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
68DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); 76DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
69static struct rcu_state *rcu_state = &rcu_preempt_state; 77static struct rcu_state *rcu_state = &rcu_preempt_state;
70 78
@@ -122,9 +130,11 @@ static void rcu_preempt_qs(int cpu)
122{ 130{
123 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 131 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
124 132
125 rdp->passed_quiesc_completed = rdp->gpnum - 1; 133 rdp->passed_quiesce_gpnum = rdp->gpnum;
126 barrier(); 134 barrier();
127 rdp->passed_quiesc = 1; 135 if (rdp->passed_quiesce == 0)
136 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
137 rdp->passed_quiesce = 1;
128 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 138 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
129} 139}
130 140
@@ -190,6 +200,11 @@ static void rcu_preempt_note_context_switch(int cpu)
190 if (rnp->qsmask & rdp->grpmask) 200 if (rnp->qsmask & rdp->grpmask)
191 rnp->gp_tasks = &t->rcu_node_entry; 201 rnp->gp_tasks = &t->rcu_node_entry;
192 } 202 }
203 trace_rcu_preempt_task(rdp->rsp->name,
204 t->pid,
205 (rnp->qsmask & rdp->grpmask)
206 ? rnp->gpnum
207 : rnp->gpnum + 1);
193 raw_spin_unlock_irqrestore(&rnp->lock, flags); 208 raw_spin_unlock_irqrestore(&rnp->lock, flags);
194 } else if (t->rcu_read_lock_nesting < 0 && 209 } else if (t->rcu_read_lock_nesting < 0 &&
195 t->rcu_read_unlock_special) { 210 t->rcu_read_unlock_special) {
@@ -299,6 +314,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
299 int empty_exp; 314 int empty_exp;
300 unsigned long flags; 315 unsigned long flags;
301 struct list_head *np; 316 struct list_head *np;
317#ifdef CONFIG_RCU_BOOST
318 struct rt_mutex *rbmp = NULL;
319#endif /* #ifdef CONFIG_RCU_BOOST */
302 struct rcu_node *rnp; 320 struct rcu_node *rnp;
303 int special; 321 int special;
304 322
@@ -344,6 +362,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
344 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 362 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
345 np = rcu_next_node_entry(t, rnp); 363 np = rcu_next_node_entry(t, rnp);
346 list_del_init(&t->rcu_node_entry); 364 list_del_init(&t->rcu_node_entry);
365 t->rcu_blocked_node = NULL;
366 trace_rcu_unlock_preempted_task("rcu_preempt",
367 rnp->gpnum, t->pid);
347 if (&t->rcu_node_entry == rnp->gp_tasks) 368 if (&t->rcu_node_entry == rnp->gp_tasks)
348 rnp->gp_tasks = np; 369 rnp->gp_tasks = np;
349 if (&t->rcu_node_entry == rnp->exp_tasks) 370 if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -351,30 +372,34 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
351#ifdef CONFIG_RCU_BOOST 372#ifdef CONFIG_RCU_BOOST
352 if (&t->rcu_node_entry == rnp->boost_tasks) 373 if (&t->rcu_node_entry == rnp->boost_tasks)
353 rnp->boost_tasks = np; 374 rnp->boost_tasks = np;
354 /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */ 375 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
355 if (t->rcu_boosted) { 376 if (t->rcu_boost_mutex) {
356 special |= RCU_READ_UNLOCK_BOOSTED; 377 rbmp = t->rcu_boost_mutex;
357 t->rcu_boosted = 0; 378 t->rcu_boost_mutex = NULL;
358 } 379 }
359#endif /* #ifdef CONFIG_RCU_BOOST */ 380#endif /* #ifdef CONFIG_RCU_BOOST */
360 t->rcu_blocked_node = NULL;
361 381
362 /* 382 /*
363 * If this was the last task on the current list, and if 383 * If this was the last task on the current list, and if
364 * we aren't waiting on any CPUs, report the quiescent state. 384 * we aren't waiting on any CPUs, report the quiescent state.
365 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. 385 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
366 */ 386 */
367 if (empty) 387 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
368 raw_spin_unlock_irqrestore(&rnp->lock, flags); 388 trace_rcu_quiescent_state_report("preempt_rcu",
369 else 389 rnp->gpnum,
390 0, rnp->qsmask,
391 rnp->level,
392 rnp->grplo,
393 rnp->grphi,
394 !!rnp->gp_tasks);
370 rcu_report_unblock_qs_rnp(rnp, flags); 395 rcu_report_unblock_qs_rnp(rnp, flags);
396 } else
397 raw_spin_unlock_irqrestore(&rnp->lock, flags);
371 398
372#ifdef CONFIG_RCU_BOOST 399#ifdef CONFIG_RCU_BOOST
373 /* Unboost if we were boosted. */ 400 /* Unboost if we were boosted. */
374 if (special & RCU_READ_UNLOCK_BOOSTED) { 401 if (rbmp)
375 rt_mutex_unlock(t->rcu_boost_mutex); 402 rt_mutex_unlock(rbmp);
376 t->rcu_boost_mutex = NULL;
377 }
378#endif /* #ifdef CONFIG_RCU_BOOST */ 403#endif /* #ifdef CONFIG_RCU_BOOST */
379 404
380 /* 405 /*
@@ -399,10 +424,10 @@ void __rcu_read_unlock(void)
399{ 424{
400 struct task_struct *t = current; 425 struct task_struct *t = current;
401 426
402 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
403 if (t->rcu_read_lock_nesting != 1) 427 if (t->rcu_read_lock_nesting != 1)
404 --t->rcu_read_lock_nesting; 428 --t->rcu_read_lock_nesting;
405 else { 429 else {
430 barrier(); /* critical section before exit code. */
406 t->rcu_read_lock_nesting = INT_MIN; 431 t->rcu_read_lock_nesting = INT_MIN;
407 barrier(); /* assign before ->rcu_read_unlock_special load */ 432 barrier(); /* assign before ->rcu_read_unlock_special load */
408 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) 433 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
@@ -466,16 +491,20 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
466 * Scan the current list of tasks blocked within RCU read-side critical 491 * Scan the current list of tasks blocked within RCU read-side critical
467 * sections, printing out the tid of each. 492 * sections, printing out the tid of each.
468 */ 493 */
469static void rcu_print_task_stall(struct rcu_node *rnp) 494static int rcu_print_task_stall(struct rcu_node *rnp)
470{ 495{
471 struct task_struct *t; 496 struct task_struct *t;
497 int ndetected = 0;
472 498
473 if (!rcu_preempt_blocked_readers_cgp(rnp)) 499 if (!rcu_preempt_blocked_readers_cgp(rnp))
474 return; 500 return 0;
475 t = list_entry(rnp->gp_tasks, 501 t = list_entry(rnp->gp_tasks,
476 struct task_struct, rcu_node_entry); 502 struct task_struct, rcu_node_entry);
477 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) 503 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
478 printk(" P%d", t->pid); 504 printk(" P%d", t->pid);
505 ndetected++;
506 }
507 return ndetected;
479} 508}
480 509
481/* 510/*
@@ -656,18 +685,9 @@ EXPORT_SYMBOL_GPL(call_rcu);
656 */ 685 */
657void synchronize_rcu(void) 686void synchronize_rcu(void)
658{ 687{
659 struct rcu_synchronize rcu;
660
661 if (!rcu_scheduler_active) 688 if (!rcu_scheduler_active)
662 return; 689 return;
663 690 wait_rcu_gp(call_rcu);
664 init_rcu_head_on_stack(&rcu.head);
665 init_completion(&rcu.completion);
666 /* Will wake me after RCU finished. */
667 call_rcu(&rcu.head, wakeme_after_rcu);
668 /* Wait for it. */
669 wait_for_completion(&rcu.completion);
670 destroy_rcu_head_on_stack(&rcu.head);
671} 691}
672EXPORT_SYMBOL_GPL(synchronize_rcu); 692EXPORT_SYMBOL_GPL(synchronize_rcu);
673 693
@@ -968,8 +988,9 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
968 * Because preemptible RCU does not exist, we never have to check for 988 * Because preemptible RCU does not exist, we never have to check for
969 * tasks blocked within RCU read-side critical sections. 989 * tasks blocked within RCU read-side critical sections.
970 */ 990 */
971static void rcu_print_task_stall(struct rcu_node *rnp) 991static int rcu_print_task_stall(struct rcu_node *rnp)
972{ 992{
993 return 0;
973} 994}
974 995
975/* 996/*
@@ -1136,6 +1157,8 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1136 1157
1137#endif /* #else #ifdef CONFIG_RCU_TRACE */ 1158#endif /* #else #ifdef CONFIG_RCU_TRACE */
1138 1159
1160static struct lock_class_key rcu_boost_class;
1161
1139/* 1162/*
1140 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1163 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1141 * or ->boost_tasks, advancing the pointer to the next task in the 1164 * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1198,8 +1221,10 @@ static int rcu_boost(struct rcu_node *rnp)
1198 */ 1221 */
1199 t = container_of(tb, struct task_struct, rcu_node_entry); 1222 t = container_of(tb, struct task_struct, rcu_node_entry);
1200 rt_mutex_init_proxy_locked(&mtx, t); 1223 rt_mutex_init_proxy_locked(&mtx, t);
1224 /* Avoid lockdep false positives. This rt_mutex is its own thing. */
1225 lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
1226 "rcu_boost_mutex");
1201 t->rcu_boost_mutex = &mtx; 1227 t->rcu_boost_mutex = &mtx;
1202 t->rcu_boosted = 1;
1203 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1228 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1204 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ 1229 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1205 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ 1230 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
@@ -1228,9 +1253,12 @@ static int rcu_boost_kthread(void *arg)
1228 int spincnt = 0; 1253 int spincnt = 0;
1229 int more2boost; 1254 int more2boost;
1230 1255
1256 trace_rcu_utilization("Start boost kthread@init");
1231 for (;;) { 1257 for (;;) {
1232 rnp->boost_kthread_status = RCU_KTHREAD_WAITING; 1258 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1259 trace_rcu_utilization("End boost kthread@rcu_wait");
1233 rcu_wait(rnp->boost_tasks || rnp->exp_tasks); 1260 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1261 trace_rcu_utilization("Start boost kthread@rcu_wait");
1234 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; 1262 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1235 more2boost = rcu_boost(rnp); 1263 more2boost = rcu_boost(rnp);
1236 if (more2boost) 1264 if (more2boost)
@@ -1238,11 +1266,14 @@ static int rcu_boost_kthread(void *arg)
1238 else 1266 else
1239 spincnt = 0; 1267 spincnt = 0;
1240 if (spincnt > 10) { 1268 if (spincnt > 10) {
1269 trace_rcu_utilization("End boost kthread@rcu_yield");
1241 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); 1270 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1271 trace_rcu_utilization("Start boost kthread@rcu_yield");
1242 spincnt = 0; 1272 spincnt = 0;
1243 } 1273 }
1244 } 1274 }
1245 /* NOTREACHED */ 1275 /* NOTREACHED */
1276 trace_rcu_utilization("End boost kthread@notreached");
1246 return 0; 1277 return 0;
1247} 1278}
1248 1279
@@ -1291,11 +1322,9 @@ static void invoke_rcu_callbacks_kthread(void)
1291 1322
1292 local_irq_save(flags); 1323 local_irq_save(flags);
1293 __this_cpu_write(rcu_cpu_has_work, 1); 1324 __this_cpu_write(rcu_cpu_has_work, 1);
1294 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { 1325 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1295 local_irq_restore(flags); 1326 current != __this_cpu_read(rcu_cpu_kthread_task))
1296 return; 1327 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1297 }
1298 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1299 local_irq_restore(flags); 1328 local_irq_restore(flags);
1300} 1329}
1301 1330
@@ -1343,13 +1372,13 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1343 if (rnp->boost_kthread_task != NULL) 1372 if (rnp->boost_kthread_task != NULL)
1344 return 0; 1373 return 0;
1345 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1374 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1346 "rcub%d", rnp_index); 1375 "rcub/%d", rnp_index);
1347 if (IS_ERR(t)) 1376 if (IS_ERR(t))
1348 return PTR_ERR(t); 1377 return PTR_ERR(t);
1349 raw_spin_lock_irqsave(&rnp->lock, flags); 1378 raw_spin_lock_irqsave(&rnp->lock, flags);
1350 rnp->boost_kthread_task = t; 1379 rnp->boost_kthread_task = t;
1351 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1380 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1352 sp.sched_priority = RCU_KTHREAD_PRIO; 1381 sp.sched_priority = RCU_BOOST_PRIO;
1353 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1382 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1354 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ 1383 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1355 return 0; 1384 return 0;
@@ -1444,6 +1473,7 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1444{ 1473{
1445 struct sched_param sp; 1474 struct sched_param sp;
1446 struct timer_list yield_timer; 1475 struct timer_list yield_timer;
1476 int prio = current->rt_priority;
1447 1477
1448 setup_timer_on_stack(&yield_timer, f, arg); 1478 setup_timer_on_stack(&yield_timer, f, arg);
1449 mod_timer(&yield_timer, jiffies + 2); 1479 mod_timer(&yield_timer, jiffies + 2);
@@ -1451,7 +1481,8 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1451 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); 1481 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1452 set_user_nice(current, 19); 1482 set_user_nice(current, 19);
1453 schedule(); 1483 schedule();
1454 sp.sched_priority = RCU_KTHREAD_PRIO; 1484 set_user_nice(current, 0);
1485 sp.sched_priority = prio;
1455 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 1486 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1456 del_timer(&yield_timer); 1487 del_timer(&yield_timer);
1457} 1488}
@@ -1489,7 +1520,8 @@ static int rcu_cpu_kthread_should_stop(int cpu)
1489 1520
1490/* 1521/*
1491 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the 1522 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1492 * earlier RCU softirq. 1523 * RCU softirq used in flavors and configurations of RCU that do not
1524 * support RCU priority boosting.
1493 */ 1525 */
1494static int rcu_cpu_kthread(void *arg) 1526static int rcu_cpu_kthread(void *arg)
1495{ 1527{
@@ -1500,9 +1532,12 @@ static int rcu_cpu_kthread(void *arg)
1500 char work; 1532 char work;
1501 char *workp = &per_cpu(rcu_cpu_has_work, cpu); 1533 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1502 1534
1535 trace_rcu_utilization("Start CPU kthread@init");
1503 for (;;) { 1536 for (;;) {
1504 *statusp = RCU_KTHREAD_WAITING; 1537 *statusp = RCU_KTHREAD_WAITING;
1538 trace_rcu_utilization("End CPU kthread@rcu_wait");
1505 rcu_wait(*workp != 0 || kthread_should_stop()); 1539 rcu_wait(*workp != 0 || kthread_should_stop());
1540 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1506 local_bh_disable(); 1541 local_bh_disable();
1507 if (rcu_cpu_kthread_should_stop(cpu)) { 1542 if (rcu_cpu_kthread_should_stop(cpu)) {
1508 local_bh_enable(); 1543 local_bh_enable();
@@ -1523,11 +1558,14 @@ static int rcu_cpu_kthread(void *arg)
1523 spincnt = 0; 1558 spincnt = 0;
1524 if (spincnt > 10) { 1559 if (spincnt > 10) {
1525 *statusp = RCU_KTHREAD_YIELDING; 1560 *statusp = RCU_KTHREAD_YIELDING;
1561 trace_rcu_utilization("End CPU kthread@rcu_yield");
1526 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); 1562 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1563 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1527 spincnt = 0; 1564 spincnt = 0;
1528 } 1565 }
1529 } 1566 }
1530 *statusp = RCU_KTHREAD_STOPPED; 1567 *statusp = RCU_KTHREAD_STOPPED;
1568 trace_rcu_utilization("End CPU kthread@term");
1531 return 0; 1569 return 0;
1532} 1570}
1533 1571
@@ -1560,7 +1598,10 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1560 if (!rcu_scheduler_fully_active || 1598 if (!rcu_scheduler_fully_active ||
1561 per_cpu(rcu_cpu_kthread_task, cpu) != NULL) 1599 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1562 return 0; 1600 return 0;
1563 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); 1601 t = kthread_create_on_node(rcu_cpu_kthread,
1602 (void *)(long)cpu,
1603 cpu_to_node(cpu),
1604 "rcuc/%d", cpu);
1564 if (IS_ERR(t)) 1605 if (IS_ERR(t))
1565 return PTR_ERR(t); 1606 return PTR_ERR(t);
1566 if (cpu_online(cpu)) 1607 if (cpu_online(cpu))
@@ -1669,7 +1710,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1669 return 0; 1710 return 0;
1670 if (rnp->node_kthread_task == NULL) { 1711 if (rnp->node_kthread_task == NULL) {
1671 t = kthread_create(rcu_node_kthread, (void *)rnp, 1712 t = kthread_create(rcu_node_kthread, (void *)rnp,
1672 "rcun%d", rnp_index); 1713 "rcun/%d", rnp_index);
1673 if (IS_ERR(t)) 1714 if (IS_ERR(t))
1674 return PTR_ERR(t); 1715 return PTR_ERR(t);
1675 raw_spin_lock_irqsave(&rnp->lock, flags); 1716 raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1907,15 +1948,6 @@ int rcu_needs_cpu(int cpu)
1907 return rcu_needs_cpu_quick_check(cpu); 1948 return rcu_needs_cpu_quick_check(cpu);
1908} 1949}
1909 1950
1910/*
1911 * Check to see if we need to continue a callback-flush operations to
1912 * allow the last CPU to enter dyntick-idle mode. But fast dyntick-idle
1913 * entry is not configured, so we never do need to.
1914 */
1915static void rcu_needs_cpu_flush(void)
1916{
1917}
1918
1919#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 1951#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1920 1952
1921#define RCU_NEEDS_CPU_FLUSHES 5 1953#define RCU_NEEDS_CPU_FLUSHES 5
@@ -1991,20 +2023,4 @@ int rcu_needs_cpu(int cpu)
1991 return c; 2023 return c;
1992} 2024}
1993 2025
1994/*
1995 * Check to see if we need to continue a callback-flush operations to
1996 * allow the last CPU to enter dyntick-idle mode.
1997 */
1998static void rcu_needs_cpu_flush(void)
1999{
2000 int cpu = smp_processor_id();
2001 unsigned long flags;
2002
2003 if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
2004 return;
2005 local_irq_save(flags);
2006 (void)rcu_needs_cpu(cpu);
2007 local_irq_restore(flags);
2008}
2009
2010#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 2026#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */