aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-07-16 06:42:35 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-08-13 11:01:06 -0400
commit5d01bbd111d6ff9ea9d9847774f66dff39633776 (patch)
tree77cbce5ff479b3e70f95a29cc88ce50e025f9078
parent3bf671af14d591ede9251acb0085e8017f3705e7 (diff)
rcu: Yield simpler
The rcu_yield() code is amazing. It's there to avoid starvation of the system when lots of (boosting) work is to be done. Now looking at the code it's functionality is: Make the thread SCHED_OTHER and very nice, i.e. get it out of the way Arm a timer with 2 ticks schedule() Now if the system goes idle the rcu task returns, regains SCHED_FIFO and plugs on. If the systems stays busy the timer fires and wakes a per node kthread which in turn makes the per cpu thread SCHED_FIFO and brings it back on the cpu. For the boosting thread the "make it FIFO" bit is missing and it just runs some magic boost checks. Now this is a lot of code with extra threads and complexity. It's way simpler to let the tasks when they detect overload schedule away for 2 ticks and defer the normal wakeup as long as they are in yielded state and the cpu is not idle. That solves the same problem and the only difference is that when the cpu goes idle it's not guaranteed that the thread returns right away, but it won't be longer out than two ticks, so no harm is done. If that's an issue than it is way simpler just to wake the task from idle as RCU has callbacks there anyway. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Namhyung Kim <namhyung@kernel.org> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/20120716103948.131256723@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--kernel/rcutree.c8
-rw-r--r--kernel/rcutree.h7
-rw-r--r--kernel/rcutree_plugin.h210
3 files changed, 41 insertions, 184 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f280e542e3e9..f08ee3bc5741 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -139,7 +139,7 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
139 139
140#endif /* #ifdef CONFIG_RCU_BOOST */ 140#endif /* #ifdef CONFIG_RCU_BOOST */
141 141
142static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 142static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
143static void invoke_rcu_core(void); 143static void invoke_rcu_core(void);
144static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 144static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
145 145
@@ -1469,7 +1469,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1469 1469
1470 /* Adjust any no-longer-needed kthreads. */ 1470 /* Adjust any no-longer-needed kthreads. */
1471 rcu_stop_cpu_kthread(cpu); 1471 rcu_stop_cpu_kthread(cpu);
1472 rcu_node_kthread_setaffinity(rnp, -1); 1472 rcu_boost_kthread_setaffinity(rnp, -1);
1473 1473
1474 /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */ 1474 /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
1475 1475
@@ -2594,11 +2594,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2594 break; 2594 break;
2595 case CPU_ONLINE: 2595 case CPU_ONLINE:
2596 case CPU_DOWN_FAILED: 2596 case CPU_DOWN_FAILED:
2597 rcu_node_kthread_setaffinity(rnp, -1); 2597 rcu_boost_kthread_setaffinity(rnp, -1);
2598 rcu_cpu_kthread_setrt(cpu, 1); 2598 rcu_cpu_kthread_setrt(cpu, 1);
2599 break; 2599 break;
2600 case CPU_DOWN_PREPARE: 2600 case CPU_DOWN_PREPARE:
2601 rcu_node_kthread_setaffinity(rnp, cpu); 2601 rcu_boost_kthread_setaffinity(rnp, cpu);
2602 rcu_cpu_kthread_setrt(cpu, 0); 2602 rcu_cpu_kthread_setrt(cpu, 0);
2603 break; 2603 break;
2604 case CPU_DYING: 2604 case CPU_DYING:
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 4d29169f2124..f08176172546 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -491,13 +491,8 @@ static void invoke_rcu_callbacks_kthread(void);
491static bool rcu_is_callbacks_kthread(void); 491static bool rcu_is_callbacks_kthread(void);
492#ifdef CONFIG_RCU_BOOST 492#ifdef CONFIG_RCU_BOOST
493static void rcu_preempt_do_callbacks(void); 493static void rcu_preempt_do_callbacks(void);
494static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
495 cpumask_var_t cm);
496static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 494static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
497 struct rcu_node *rnp, 495 struct rcu_node *rnp);
498 int rnp_index);
499static void invoke_rcu_node_kthread(struct rcu_node *rnp);
500static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
501#endif /* #ifdef CONFIG_RCU_BOOST */ 496#endif /* #ifdef CONFIG_RCU_BOOST */
502static void rcu_cpu_kthread_setrt(int cpu, int to_rt); 497static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
503static void __cpuinit rcu_prepare_kthreads(int cpu); 498static void __cpuinit rcu_prepare_kthreads(int cpu);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 7f3244c0df01..0f8b5ec64a7d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1069,6 +1069,16 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1069 1069
1070#endif /* #else #ifdef CONFIG_RCU_TRACE */ 1070#endif /* #else #ifdef CONFIG_RCU_TRACE */
1071 1071
1072static void rcu_wake_cond(struct task_struct *t, int status)
1073{
1074 /*
1075 * If the thread is yielding, only wake it when this
1076 * is invoked from idle
1077 */
1078 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1079 wake_up_process(t);
1080}
1081
1072/* 1082/*
1073 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1083 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1074 * or ->boost_tasks, advancing the pointer to the next task in the 1084 * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1141,17 +1151,6 @@ static int rcu_boost(struct rcu_node *rnp)
1141} 1151}
1142 1152
1143/* 1153/*
1144 * Timer handler to initiate waking up of boost kthreads that
1145 * have yielded the CPU due to excessive numbers of tasks to
1146 * boost. We wake up the per-rcu_node kthread, which in turn
1147 * will wake up the booster kthread.
1148 */
1149static void rcu_boost_kthread_timer(unsigned long arg)
1150{
1151 invoke_rcu_node_kthread((struct rcu_node *)arg);
1152}
1153
1154/*
1155 * Priority-boosting kthread. One per leaf rcu_node and one for the 1154 * Priority-boosting kthread. One per leaf rcu_node and one for the
1156 * root rcu_node. 1155 * root rcu_node.
1157 */ 1156 */
@@ -1174,8 +1173,9 @@ static int rcu_boost_kthread(void *arg)
1174 else 1173 else
1175 spincnt = 0; 1174 spincnt = 0;
1176 if (spincnt > 10) { 1175 if (spincnt > 10) {
1176 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1177 trace_rcu_utilization("End boost kthread@rcu_yield"); 1177 trace_rcu_utilization("End boost kthread@rcu_yield");
1178 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); 1178 schedule_timeout_interruptible(2);
1179 trace_rcu_utilization("Start boost kthread@rcu_yield"); 1179 trace_rcu_utilization("Start boost kthread@rcu_yield");
1180 spincnt = 0; 1180 spincnt = 0;
1181 } 1181 }
@@ -1213,8 +1213,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1213 rnp->boost_tasks = rnp->gp_tasks; 1213 rnp->boost_tasks = rnp->gp_tasks;
1214 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1214 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1215 t = rnp->boost_kthread_task; 1215 t = rnp->boost_kthread_task;
1216 if (t != NULL) 1216 if (t)
1217 wake_up_process(t); 1217 rcu_wake_cond(t, rnp->boost_kthread_status);
1218 } else { 1218 } else {
1219 rcu_initiate_boost_trace(rnp); 1219 rcu_initiate_boost_trace(rnp);
1220 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1220 raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1231,8 +1231,10 @@ static void invoke_rcu_callbacks_kthread(void)
1231 local_irq_save(flags); 1231 local_irq_save(flags);
1232 __this_cpu_write(rcu_cpu_has_work, 1); 1232 __this_cpu_write(rcu_cpu_has_work, 1);
1233 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && 1233 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1234 current != __this_cpu_read(rcu_cpu_kthread_task)) 1234 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1235 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); 1235 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1236 __this_cpu_read(rcu_cpu_kthread_status));
1237 }
1236 local_irq_restore(flags); 1238 local_irq_restore(flags);
1237} 1239}
1238 1240
@@ -1245,21 +1247,6 @@ static bool rcu_is_callbacks_kthread(void)
1245 return __get_cpu_var(rcu_cpu_kthread_task) == current; 1247 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1246} 1248}
1247 1249
1248/*
1249 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1250 * held, so no one should be messing with the existence of the boost
1251 * kthread.
1252 */
1253static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1254 cpumask_var_t cm)
1255{
1256 struct task_struct *t;
1257
1258 t = rnp->boost_kthread_task;
1259 if (t != NULL)
1260 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1261}
1262
1263#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1250#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1264 1251
1265/* 1252/*
@@ -1276,15 +1263,19 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1276 * Returns zero if all is well, a negated errno otherwise. 1263 * Returns zero if all is well, a negated errno otherwise.
1277 */ 1264 */
1278static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1265static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1279 struct rcu_node *rnp, 1266 struct rcu_node *rnp)
1280 int rnp_index)
1281{ 1267{
1268 int rnp_index = rnp - &rsp->node[0];
1282 unsigned long flags; 1269 unsigned long flags;
1283 struct sched_param sp; 1270 struct sched_param sp;
1284 struct task_struct *t; 1271 struct task_struct *t;
1285 1272
1286 if (&rcu_preempt_state != rsp) 1273 if (&rcu_preempt_state != rsp)
1287 return 0; 1274 return 0;
1275
1276 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1277 return 0;
1278
1288 rsp->boost = 1; 1279 rsp->boost = 1;
1289 if (rnp->boost_kthread_task != NULL) 1280 if (rnp->boost_kthread_task != NULL)
1290 return 0; 1281 return 0;
@@ -1328,20 +1319,6 @@ static void rcu_kthread_do_work(void)
1328} 1319}
1329 1320
1330/* 1321/*
1331 * Wake up the specified per-rcu_node-structure kthread.
1332 * Because the per-rcu_node kthreads are immortal, we don't need
1333 * to do anything to keep them alive.
1334 */
1335static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1336{
1337 struct task_struct *t;
1338
1339 t = rnp->node_kthread_task;
1340 if (t != NULL)
1341 wake_up_process(t);
1342}
1343
1344/*
1345 * Set the specified CPU's kthread to run RT or not, as specified by 1322 * Set the specified CPU's kthread to run RT or not, as specified by
1346 * the to_rt argument. The CPU-hotplug locks are held, so the task 1323 * the to_rt argument. The CPU-hotplug locks are held, so the task
1347 * is not going away. 1324 * is not going away.
@@ -1366,45 +1343,6 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1366} 1343}
1367 1344
1368/* 1345/*
1369 * Timer handler to initiate the waking up of per-CPU kthreads that
1370 * have yielded the CPU due to excess numbers of RCU callbacks.
1371 * We wake up the per-rcu_node kthread, which in turn will wake up
1372 * the booster kthread.
1373 */
1374static void rcu_cpu_kthread_timer(unsigned long arg)
1375{
1376 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1377 struct rcu_node *rnp = rdp->mynode;
1378
1379 atomic_or(rdp->grpmask, &rnp->wakemask);
1380 invoke_rcu_node_kthread(rnp);
1381}
1382
1383/*
1384 * Drop to non-real-time priority and yield, but only after posting a
1385 * timer that will cause us to regain our real-time priority if we
1386 * remain preempted. Either way, we restore our real-time priority
1387 * before returning.
1388 */
1389static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1390{
1391 struct sched_param sp;
1392 struct timer_list yield_timer;
1393 int prio = current->rt_priority;
1394
1395 setup_timer_on_stack(&yield_timer, f, arg);
1396 mod_timer(&yield_timer, jiffies + 2);
1397 sp.sched_priority = 0;
1398 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1399 set_user_nice(current, 19);
1400 schedule();
1401 set_user_nice(current, 0);
1402 sp.sched_priority = prio;
1403 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1404 del_timer(&yield_timer);
1405}
1406
1407/*
1408 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. 1346 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1409 * This can happen while the corresponding CPU is either coming online 1347 * This can happen while the corresponding CPU is either coming online
1410 * or going offline. We cannot wait until the CPU is fully online 1348 * or going offline. We cannot wait until the CPU is fully online
@@ -1476,7 +1414,7 @@ static int rcu_cpu_kthread(void *arg)
1476 if (spincnt > 10) { 1414 if (spincnt > 10) {
1477 *statusp = RCU_KTHREAD_YIELDING; 1415 *statusp = RCU_KTHREAD_YIELDING;
1478 trace_rcu_utilization("End CPU kthread@rcu_yield"); 1416 trace_rcu_utilization("End CPU kthread@rcu_yield");
1479 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); 1417 schedule_timeout_interruptible(2);
1480 trace_rcu_utilization("Start CPU kthread@rcu_yield"); 1418 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1481 spincnt = 0; 1419 spincnt = 0;
1482 } 1420 }
@@ -1533,48 +1471,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1533} 1471}
1534 1472
1535/* 1473/*
1536 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1537 * kthreads when needed. We ignore requests to wake up kthreads
1538 * for offline CPUs, which is OK because force_quiescent_state()
1539 * takes care of this case.
1540 */
1541static int rcu_node_kthread(void *arg)
1542{
1543 int cpu;
1544 unsigned long flags;
1545 unsigned long mask;
1546 struct rcu_node *rnp = (struct rcu_node *)arg;
1547 struct sched_param sp;
1548 struct task_struct *t;
1549
1550 for (;;) {
1551 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1552 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1553 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1554 raw_spin_lock_irqsave(&rnp->lock, flags);
1555 mask = atomic_xchg(&rnp->wakemask, 0);
1556 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1557 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1558 if ((mask & 0x1) == 0)
1559 continue;
1560 preempt_disable();
1561 t = per_cpu(rcu_cpu_kthread_task, cpu);
1562 if (!cpu_online(cpu) || t == NULL) {
1563 preempt_enable();
1564 continue;
1565 }
1566 per_cpu(rcu_cpu_has_work, cpu) = 1;
1567 sp.sched_priority = RCU_KTHREAD_PRIO;
1568 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1569 preempt_enable();
1570 }
1571 }
1572 /* NOTREACHED */
1573 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1574 return 0;
1575}
1576
1577/*
1578 * Set the per-rcu_node kthread's affinity to cover all CPUs that are 1474 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1579 * served by the rcu_node in question. The CPU hotplug lock is still 1475 * served by the rcu_node in question. The CPU hotplug lock is still
1580 * held, so the value of rnp->qsmaskinit will be stable. 1476 * held, so the value of rnp->qsmaskinit will be stable.
@@ -1583,17 +1479,17 @@ static int rcu_node_kthread(void *arg)
1583 * no outgoing CPU. If there are no CPUs left in the affinity set, 1479 * no outgoing CPU. If there are no CPUs left in the affinity set,
1584 * this function allows the kthread to execute on any CPU. 1480 * this function allows the kthread to execute on any CPU.
1585 */ 1481 */
1586static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1482static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1587{ 1483{
1484 struct task_struct *t = rnp->boost_kthread_task;
1485 unsigned long mask = rnp->qsmaskinit;
1588 cpumask_var_t cm; 1486 cpumask_var_t cm;
1589 int cpu; 1487 int cpu;
1590 unsigned long mask = rnp->qsmaskinit;
1591 1488
1592 if (rnp->node_kthread_task == NULL) 1489 if (!t)
1593 return; 1490 return;
1594 if (!alloc_cpumask_var(&cm, GFP_KERNEL)) 1491 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1595 return; 1492 return;
1596 cpumask_clear(cm);
1597 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) 1493 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1598 if ((mask & 0x1) && cpu != outgoingcpu) 1494 if ((mask & 0x1) && cpu != outgoingcpu)
1599 cpumask_set_cpu(cpu, cm); 1495 cpumask_set_cpu(cpu, cm);
@@ -1603,50 +1499,17 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1603 cpumask_clear_cpu(cpu, cm); 1499 cpumask_clear_cpu(cpu, cm);
1604 WARN_ON_ONCE(cpumask_weight(cm) == 0); 1500 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1605 } 1501 }
1606 set_cpus_allowed_ptr(rnp->node_kthread_task, cm); 1502 set_cpus_allowed_ptr(t, cm);
1607 rcu_boost_kthread_setaffinity(rnp, cm);
1608 free_cpumask_var(cm); 1503 free_cpumask_var(cm);
1609} 1504}
1610 1505
1611/* 1506/*
1612 * Spawn a per-rcu_node kthread, setting priority and affinity.
1613 * Called during boot before online/offline can happen, or, if
1614 * during runtime, with the main CPU-hotplug locks held. So only
1615 * one of these can be executing at a time.
1616 */
1617static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1618 struct rcu_node *rnp)
1619{
1620 unsigned long flags;
1621 int rnp_index = rnp - &rsp->node[0];
1622 struct sched_param sp;
1623 struct task_struct *t;
1624
1625 if (!rcu_scheduler_fully_active ||
1626 rnp->qsmaskinit == 0)
1627 return 0;
1628 if (rnp->node_kthread_task == NULL) {
1629 t = kthread_create(rcu_node_kthread, (void *)rnp,
1630 "rcun/%d", rnp_index);
1631 if (IS_ERR(t))
1632 return PTR_ERR(t);
1633 raw_spin_lock_irqsave(&rnp->lock, flags);
1634 rnp->node_kthread_task = t;
1635 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1636 sp.sched_priority = 99;
1637 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1638 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1639 }
1640 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1641}
1642
1643/*
1644 * Spawn all kthreads -- called as soon as the scheduler is running. 1507 * Spawn all kthreads -- called as soon as the scheduler is running.
1645 */ 1508 */
1646static int __init rcu_spawn_kthreads(void) 1509static int __init rcu_spawn_kthreads(void)
1647{ 1510{
1648 int cpu;
1649 struct rcu_node *rnp; 1511 struct rcu_node *rnp;
1512 int cpu;
1650 1513
1651 rcu_scheduler_fully_active = 1; 1514 rcu_scheduler_fully_active = 1;
1652 for_each_possible_cpu(cpu) { 1515 for_each_possible_cpu(cpu) {
@@ -1655,10 +1518,10 @@ static int __init rcu_spawn_kthreads(void)
1655 (void)rcu_spawn_one_cpu_kthread(cpu); 1518 (void)rcu_spawn_one_cpu_kthread(cpu);
1656 } 1519 }
1657 rnp = rcu_get_root(rcu_state); 1520 rnp = rcu_get_root(rcu_state);
1658 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1521 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1659 if (NUM_RCU_NODES > 1) { 1522 if (NUM_RCU_NODES > 1) {
1660 rcu_for_each_leaf_node(rcu_state, rnp) 1523 rcu_for_each_leaf_node(rcu_state, rnp)
1661 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1524 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1662 } 1525 }
1663 return 0; 1526 return 0;
1664} 1527}
@@ -1672,8 +1535,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1672 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1535 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1673 if (rcu_scheduler_fully_active) { 1536 if (rcu_scheduler_fully_active) {
1674 (void)rcu_spawn_one_cpu_kthread(cpu); 1537 (void)rcu_spawn_one_cpu_kthread(cpu);
1675 if (rnp->node_kthread_task == NULL) 1538 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1676 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1677 } 1539 }
1678} 1540}
1679 1541
@@ -1706,7 +1568,7 @@ static void rcu_stop_cpu_kthread(int cpu)
1706 1568
1707#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 1569#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1708 1570
1709static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1571static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1710{ 1572{
1711} 1573}
1712 1574