aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-25 13:01:45 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-25 13:01:45 -0400
commit5217192b85480353aeeb395574e60d0db04f3676 (patch)
treea45b440623a8fc55ece18b72e40829b2d9815d74 /kernel/rcutree_plugin.h
parentbda4ec9f6a7d7b249c7b14baa553731efedce300 (diff)
parentbff4a394795add6b919debc009f72b7607f5d4bf (diff)
Merge remote-tracking branch 'tip/smp/hotplug' into next.2012.09.25b
The conflicts between kernel/rcutree.h and kernel/rcutree_plugin.h were due to adjacent insertions and deletions, which were resolved by simply accepting the changes on both branches.
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h403
1 files changed, 71 insertions, 332 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 46d7d6cf16db..9c71c1b18e03 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -26,6 +26,7 @@
26 26
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/oom.h> 28#include <linux/oom.h>
29#include <linux/smpboot.h>
29 30
30#define RCU_KTHREAD_PRIO 1 31#define RCU_KTHREAD_PRIO 1
31 32
@@ -1090,6 +1091,16 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1090 1091
1091#endif /* #else #ifdef CONFIG_RCU_TRACE */ 1092#endif /* #else #ifdef CONFIG_RCU_TRACE */
1092 1093
1094static void rcu_wake_cond(struct task_struct *t, int status)
1095{
1096 /*
1097 * If the thread is yielding, only wake it when this
1098 * is invoked from idle
1099 */
1100 if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1101 wake_up_process(t);
1102}
1103
1093/* 1104/*
1094 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1105 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1095 * or ->boost_tasks, advancing the pointer to the next task in the 1106 * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1162,17 +1173,6 @@ static int rcu_boost(struct rcu_node *rnp)
1162} 1173}
1163 1174
1164/* 1175/*
1165 * Timer handler to initiate waking up of boost kthreads that
1166 * have yielded the CPU due to excessive numbers of tasks to
1167 * boost. We wake up the per-rcu_node kthread, which in turn
1168 * will wake up the booster kthread.
1169 */
1170static void rcu_boost_kthread_timer(unsigned long arg)
1171{
1172 invoke_rcu_node_kthread((struct rcu_node *)arg);
1173}
1174
1175/*
1176 * Priority-boosting kthread. One per leaf rcu_node and one for the 1176 * Priority-boosting kthread. One per leaf rcu_node and one for the
1177 * root rcu_node. 1177 * root rcu_node.
1178 */ 1178 */
@@ -1195,8 +1195,9 @@ static int rcu_boost_kthread(void *arg)
1195 else 1195 else
1196 spincnt = 0; 1196 spincnt = 0;
1197 if (spincnt > 10) { 1197 if (spincnt > 10) {
1198 rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
1198 trace_rcu_utilization("End boost kthread@rcu_yield"); 1199 trace_rcu_utilization("End boost kthread@rcu_yield");
1199 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); 1200 schedule_timeout_interruptible(2);
1200 trace_rcu_utilization("Start boost kthread@rcu_yield"); 1201 trace_rcu_utilization("Start boost kthread@rcu_yield");
1201 spincnt = 0; 1202 spincnt = 0;
1202 } 1203 }
@@ -1234,8 +1235,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1234 rnp->boost_tasks = rnp->gp_tasks; 1235 rnp->boost_tasks = rnp->gp_tasks;
1235 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1236 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1236 t = rnp->boost_kthread_task; 1237 t = rnp->boost_kthread_task;
1237 if (t != NULL) 1238 if (t)
1238 wake_up_process(t); 1239 rcu_wake_cond(t, rnp->boost_kthread_status);
1239 } else { 1240 } else {
1240 rcu_initiate_boost_trace(rnp); 1241 rcu_initiate_boost_trace(rnp);
1241 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1242 raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -1252,8 +1253,10 @@ static void invoke_rcu_callbacks_kthread(void)
1252 local_irq_save(flags); 1253 local_irq_save(flags);
1253 __this_cpu_write(rcu_cpu_has_work, 1); 1254 __this_cpu_write(rcu_cpu_has_work, 1);
1254 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && 1255 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1255 current != __this_cpu_read(rcu_cpu_kthread_task)) 1256 current != __this_cpu_read(rcu_cpu_kthread_task)) {
1256 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); 1257 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1258 __this_cpu_read(rcu_cpu_kthread_status));
1259 }
1257 local_irq_restore(flags); 1260 local_irq_restore(flags);
1258} 1261}
1259 1262
@@ -1266,21 +1269,6 @@ static bool rcu_is_callbacks_kthread(void)
1266 return __get_cpu_var(rcu_cpu_kthread_task) == current; 1269 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1267} 1270}
1268 1271
1269/*
1270 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1271 * held, so no one should be messing with the existence of the boost
1272 * kthread.
1273 */
1274static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1275 cpumask_var_t cm)
1276{
1277 struct task_struct *t;
1278
1279 t = rnp->boost_kthread_task;
1280 if (t != NULL)
1281 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1282}
1283
1284#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) 1272#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1285 1273
1286/* 1274/*
@@ -1297,15 +1285,19 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1297 * Returns zero if all is well, a negated errno otherwise. 1285 * Returns zero if all is well, a negated errno otherwise.
1298 */ 1286 */
1299static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1287static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1300 struct rcu_node *rnp, 1288 struct rcu_node *rnp)
1301 int rnp_index)
1302{ 1289{
1290 int rnp_index = rnp - &rsp->node[0];
1303 unsigned long flags; 1291 unsigned long flags;
1304 struct sched_param sp; 1292 struct sched_param sp;
1305 struct task_struct *t; 1293 struct task_struct *t;
1306 1294
1307 if (&rcu_preempt_state != rsp) 1295 if (&rcu_preempt_state != rsp)
1308 return 0; 1296 return 0;
1297
1298 if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0)
1299 return 0;
1300
1309 rsp->boost = 1; 1301 rsp->boost = 1;
1310 if (rnp->boost_kthread_task != NULL) 1302 if (rnp->boost_kthread_task != NULL)
1311 return 0; 1303 return 0;
@@ -1322,25 +1314,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1322 return 0; 1314 return 0;
1323} 1315}
1324 1316
1325#ifdef CONFIG_HOTPLUG_CPU
1326
1327/*
1328 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1329 */
1330static void rcu_stop_cpu_kthread(int cpu)
1331{
1332 struct task_struct *t;
1333
1334 /* Stop the CPU's kthread. */
1335 t = per_cpu(rcu_cpu_kthread_task, cpu);
1336 if (t != NULL) {
1337 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1338 kthread_stop(t);
1339 }
1340}
1341
1342#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1343
1344static void rcu_kthread_do_work(void) 1317static void rcu_kthread_do_work(void)
1345{ 1318{
1346 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); 1319 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
@@ -1348,112 +1321,22 @@ static void rcu_kthread_do_work(void)
1348 rcu_preempt_do_callbacks(); 1321 rcu_preempt_do_callbacks();
1349} 1322}
1350 1323
1351/* 1324static void rcu_cpu_kthread_setup(unsigned int cpu)
1352 * Wake up the specified per-rcu_node-structure kthread.
1353 * Because the per-rcu_node kthreads are immortal, we don't need
1354 * to do anything to keep them alive.
1355 */
1356static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1357{
1358 struct task_struct *t;
1359
1360 t = rnp->node_kthread_task;
1361 if (t != NULL)
1362 wake_up_process(t);
1363}
1364
1365/*
1366 * Set the specified CPU's kthread to run RT or not, as specified by
1367 * the to_rt argument. The CPU-hotplug locks are held, so the task
1368 * is not going away.
1369 */
1370static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1371{ 1325{
1372 int policy;
1373 struct sched_param sp; 1326 struct sched_param sp;
1374 struct task_struct *t;
1375
1376 t = per_cpu(rcu_cpu_kthread_task, cpu);
1377 if (t == NULL)
1378 return;
1379 if (to_rt) {
1380 policy = SCHED_FIFO;
1381 sp.sched_priority = RCU_KTHREAD_PRIO;
1382 } else {
1383 policy = SCHED_NORMAL;
1384 sp.sched_priority = 0;
1385 }
1386 sched_setscheduler_nocheck(t, policy, &sp);
1387}
1388
1389/*
1390 * Timer handler to initiate the waking up of per-CPU kthreads that
1391 * have yielded the CPU due to excess numbers of RCU callbacks.
1392 * We wake up the per-rcu_node kthread, which in turn will wake up
1393 * the booster kthread.
1394 */
1395static void rcu_cpu_kthread_timer(unsigned long arg)
1396{
1397 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1398 struct rcu_node *rnp = rdp->mynode;
1399 1327
1400 atomic_or(rdp->grpmask, &rnp->wakemask); 1328 sp.sched_priority = RCU_KTHREAD_PRIO;
1401 invoke_rcu_node_kthread(rnp); 1329 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1402} 1330}
1403 1331
1404/* 1332static void rcu_cpu_kthread_park(unsigned int cpu)
1405 * Drop to non-real-time priority and yield, but only after posting a
1406 * timer that will cause us to regain our real-time priority if we
1407 * remain preempted. Either way, we restore our real-time priority
1408 * before returning.
1409 */
1410static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1411{ 1333{
1412 struct sched_param sp; 1334 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1413 struct timer_list yield_timer;
1414 int prio = current->rt_priority;
1415
1416 setup_timer_on_stack(&yield_timer, f, arg);
1417 mod_timer(&yield_timer, jiffies + 2);
1418 sp.sched_priority = 0;
1419 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1420 set_user_nice(current, 19);
1421 schedule();
1422 set_user_nice(current, 0);
1423 sp.sched_priority = prio;
1424 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1425 del_timer(&yield_timer);
1426} 1335}
1427 1336
1428/* 1337static int rcu_cpu_kthread_should_run(unsigned int cpu)
1429 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1430 * This can happen while the corresponding CPU is either coming online
1431 * or going offline. We cannot wait until the CPU is fully online
1432 * before starting the kthread, because the various notifier functions
1433 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1434 * the corresponding CPU is online.
1435 *
1436 * Return 1 if the kthread needs to stop, 0 otherwise.
1437 *
1438 * Caller must disable bh. This function can momentarily enable it.
1439 */
1440static int rcu_cpu_kthread_should_stop(int cpu)
1441{ 1338{
1442 while (cpu_is_offline(cpu) || 1339 return __get_cpu_var(rcu_cpu_has_work);
1443 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1444 smp_processor_id() != cpu) {
1445 if (kthread_should_stop())
1446 return 1;
1447 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1448 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1449 local_bh_enable();
1450 schedule_timeout_uninterruptible(1);
1451 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1452 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1453 local_bh_disable();
1454 }
1455 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1456 return 0;
1457} 1340}
1458 1341
1459/* 1342/*
@@ -1461,138 +1344,35 @@ static int rcu_cpu_kthread_should_stop(int cpu)
1461 * RCU softirq used in flavors and configurations of RCU that do not 1344 * RCU softirq used in flavors and configurations of RCU that do not
1462 * support RCU priority boosting. 1345 * support RCU priority boosting.
1463 */ 1346 */
1464static int rcu_cpu_kthread(void *arg) 1347static void rcu_cpu_kthread(unsigned int cpu)
1465{ 1348{
1466 int cpu = (int)(long)arg; 1349 unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
1467 unsigned long flags; 1350 char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
1468 int spincnt = 0; 1351 int spincnt;
1469 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1470 char work;
1471 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1472 1352
1473 trace_rcu_utilization("Start CPU kthread@init"); 1353 for (spincnt = 0; spincnt < 10; spincnt++) {
1474 for (;;) {
1475 *statusp = RCU_KTHREAD_WAITING;
1476 trace_rcu_utilization("End CPU kthread@rcu_wait");
1477 rcu_wait(*workp != 0 || kthread_should_stop());
1478 trace_rcu_utilization("Start CPU kthread@rcu_wait"); 1354 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1479 local_bh_disable(); 1355 local_bh_disable();
1480 if (rcu_cpu_kthread_should_stop(cpu)) {
1481 local_bh_enable();
1482 break;
1483 }
1484 *statusp = RCU_KTHREAD_RUNNING; 1356 *statusp = RCU_KTHREAD_RUNNING;
1485 per_cpu(rcu_cpu_kthread_loops, cpu)++; 1357 this_cpu_inc(rcu_cpu_kthread_loops);
1486 local_irq_save(flags); 1358 local_irq_disable();
1487 work = *workp; 1359 work = *workp;
1488 *workp = 0; 1360 *workp = 0;
1489 local_irq_restore(flags); 1361 local_irq_enable();
1490 if (work) 1362 if (work)
1491 rcu_kthread_do_work(); 1363 rcu_kthread_do_work();
1492 local_bh_enable(); 1364 local_bh_enable();
1493 if (*workp != 0) 1365 if (*workp == 0) {
1494 spincnt++; 1366 trace_rcu_utilization("End CPU kthread@rcu_wait");
1495 else 1367 *statusp = RCU_KTHREAD_WAITING;
1496 spincnt = 0; 1368 return;
1497 if (spincnt > 10) {
1498 *statusp = RCU_KTHREAD_YIELDING;
1499 trace_rcu_utilization("End CPU kthread@rcu_yield");
1500 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1501 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1502 spincnt = 0;
1503 }
1504 }
1505 *statusp = RCU_KTHREAD_STOPPED;
1506 trace_rcu_utilization("End CPU kthread@term");
1507 return 0;
1508}
1509
1510/*
1511 * Spawn a per-CPU kthread, setting up affinity and priority.
1512 * Because the CPU hotplug lock is held, no other CPU will be attempting
1513 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1514 * attempting to access it during boot, but the locking in kthread_bind()
1515 * will enforce sufficient ordering.
1516 *
1517 * Please note that we cannot simply refuse to wake up the per-CPU
1518 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1519 * which can result in softlockup complaints if the task ends up being
1520 * idle for more than a couple of minutes.
1521 *
1522 * However, please note also that we cannot bind the per-CPU kthread to its
1523 * CPU until that CPU is fully online. We also cannot wait until the
1524 * CPU is fully online before we create its per-CPU kthread, as this would
1525 * deadlock the system when CPU notifiers tried waiting for grace
1526 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1527 * is online. If its CPU is not yet fully online, then the code in
1528 * rcu_cpu_kthread() will wait until it is fully online, and then do
1529 * the binding.
1530 */
1531static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1532{
1533 struct sched_param sp;
1534 struct task_struct *t;
1535
1536 if (!rcu_scheduler_fully_active ||
1537 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1538 return 0;
1539 t = kthread_create_on_node(rcu_cpu_kthread,
1540 (void *)(long)cpu,
1541 cpu_to_node(cpu),
1542 "rcuc/%d", cpu);
1543 if (IS_ERR(t))
1544 return PTR_ERR(t);
1545 if (cpu_online(cpu))
1546 kthread_bind(t, cpu);
1547 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1548 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1549 sp.sched_priority = RCU_KTHREAD_PRIO;
1550 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1551 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1552 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1553 return 0;
1554}
1555
1556/*
1557 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1558 * kthreads when needed. We ignore requests to wake up kthreads
1559 * for offline CPUs, which is OK because force_quiescent_state()
1560 * takes care of this case.
1561 */
1562static int rcu_node_kthread(void *arg)
1563{
1564 int cpu;
1565 unsigned long flags;
1566 unsigned long mask;
1567 struct rcu_node *rnp = (struct rcu_node *)arg;
1568 struct sched_param sp;
1569 struct task_struct *t;
1570
1571 for (;;) {
1572 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1573 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1574 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1575 raw_spin_lock_irqsave(&rnp->lock, flags);
1576 mask = atomic_xchg(&rnp->wakemask, 0);
1577 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1578 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1579 if ((mask & 0x1) == 0)
1580 continue;
1581 preempt_disable();
1582 t = per_cpu(rcu_cpu_kthread_task, cpu);
1583 if (!cpu_online(cpu) || t == NULL) {
1584 preempt_enable();
1585 continue;
1586 }
1587 per_cpu(rcu_cpu_has_work, cpu) = 1;
1588 sp.sched_priority = RCU_KTHREAD_PRIO;
1589 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1590 preempt_enable();
1591 } 1369 }
1592 } 1370 }
1593 /* NOTREACHED */ 1371 *statusp = RCU_KTHREAD_YIELDING;
1594 rnp->node_kthread_status = RCU_KTHREAD_STOPPED; 1372 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1595 return 0; 1373 schedule_timeout_interruptible(2);
1374 trace_rcu_utilization("End CPU kthread@rcu_yield");
1375 *statusp = RCU_KTHREAD_WAITING;
1596} 1376}
1597 1377
1598/* 1378/*
@@ -1604,17 +1384,17 @@ static int rcu_node_kthread(void *arg)
1604 * no outgoing CPU. If there are no CPUs left in the affinity set, 1384 * no outgoing CPU. If there are no CPUs left in the affinity set,
1605 * this function allows the kthread to execute on any CPU. 1385 * this function allows the kthread to execute on any CPU.
1606 */ 1386 */
1607static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) 1387static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1608{ 1388{
1389 struct task_struct *t = rnp->boost_kthread_task;
1390 unsigned long mask = rnp->qsmaskinit;
1609 cpumask_var_t cm; 1391 cpumask_var_t cm;
1610 int cpu; 1392 int cpu;
1611 unsigned long mask = rnp->qsmaskinit;
1612 1393
1613 if (rnp->node_kthread_task == NULL) 1394 if (!t)
1614 return; 1395 return;
1615 if (!alloc_cpumask_var(&cm, GFP_KERNEL)) 1396 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1616 return; 1397 return;
1617 cpumask_clear(cm);
1618 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) 1398 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1619 if ((mask & 0x1) && cpu != outgoingcpu) 1399 if ((mask & 0x1) && cpu != outgoingcpu)
1620 cpumask_set_cpu(cpu, cm); 1400 cpumask_set_cpu(cpu, cm);
@@ -1624,62 +1404,36 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1624 cpumask_clear_cpu(cpu, cm); 1404 cpumask_clear_cpu(cpu, cm);
1625 WARN_ON_ONCE(cpumask_weight(cm) == 0); 1405 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1626 } 1406 }
1627 set_cpus_allowed_ptr(rnp->node_kthread_task, cm); 1407 set_cpus_allowed_ptr(t, cm);
1628 rcu_boost_kthread_setaffinity(rnp, cm);
1629 free_cpumask_var(cm); 1408 free_cpumask_var(cm);
1630} 1409}
1631 1410
1632/* 1411static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1633 * Spawn a per-rcu_node kthread, setting priority and affinity. 1412 .store = &rcu_cpu_kthread_task,
1634 * Called during boot before online/offline can happen, or, if 1413 .thread_should_run = rcu_cpu_kthread_should_run,
1635 * during runtime, with the main CPU-hotplug locks held. So only 1414 .thread_fn = rcu_cpu_kthread,
1636 * one of these can be executing at a time. 1415 .thread_comm = "rcuc/%u",
1637 */ 1416 .setup = rcu_cpu_kthread_setup,
1638static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, 1417 .park = rcu_cpu_kthread_park,
1639 struct rcu_node *rnp) 1418};
1640{
1641 unsigned long flags;
1642 int rnp_index = rnp - &rsp->node[0];
1643 struct sched_param sp;
1644 struct task_struct *t;
1645
1646 if (!rcu_scheduler_fully_active ||
1647 rnp->qsmaskinit == 0)
1648 return 0;
1649 if (rnp->node_kthread_task == NULL) {
1650 t = kthread_create(rcu_node_kthread, (void *)rnp,
1651 "rcun/%d", rnp_index);
1652 if (IS_ERR(t))
1653 return PTR_ERR(t);
1654 raw_spin_lock_irqsave(&rnp->lock, flags);
1655 rnp->node_kthread_task = t;
1656 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1657 sp.sched_priority = 99;
1658 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1659 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1660 }
1661 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1662}
1663 1419
1664/* 1420/*
1665 * Spawn all kthreads -- called as soon as the scheduler is running. 1421 * Spawn all kthreads -- called as soon as the scheduler is running.
1666 */ 1422 */
1667static int __init rcu_spawn_kthreads(void) 1423static int __init rcu_spawn_kthreads(void)
1668{ 1424{
1669 int cpu;
1670 struct rcu_node *rnp; 1425 struct rcu_node *rnp;
1426 int cpu;
1671 1427
1672 rcu_scheduler_fully_active = 1; 1428 rcu_scheduler_fully_active = 1;
1673 for_each_possible_cpu(cpu) { 1429 for_each_possible_cpu(cpu)
1674 per_cpu(rcu_cpu_has_work, cpu) = 0; 1430 per_cpu(rcu_cpu_has_work, cpu) = 0;
1675 if (cpu_online(cpu)) 1431 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
1676 (void)rcu_spawn_one_cpu_kthread(cpu);
1677 }
1678 rnp = rcu_get_root(rcu_state); 1432 rnp = rcu_get_root(rcu_state);
1679 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1433 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1680 if (NUM_RCU_NODES > 1) { 1434 if (NUM_RCU_NODES > 1) {
1681 rcu_for_each_leaf_node(rcu_state, rnp) 1435 rcu_for_each_leaf_node(rcu_state, rnp)
1682 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1436 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1683 } 1437 }
1684 return 0; 1438 return 0;
1685} 1439}
@@ -1691,11 +1445,8 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1691 struct rcu_node *rnp = rdp->mynode; 1445 struct rcu_node *rnp = rdp->mynode;
1692 1446
1693 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ 1447 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1694 if (rcu_scheduler_fully_active) { 1448 if (rcu_scheduler_fully_active)
1695 (void)rcu_spawn_one_cpu_kthread(cpu); 1449 (void)rcu_spawn_one_boost_kthread(rcu_state, rnp);
1696 if (rnp->node_kthread_task == NULL)
1697 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1698 }
1699} 1450}
1700 1451
1701#else /* #ifdef CONFIG_RCU_BOOST */ 1452#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1719,19 +1470,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1719{ 1470{
1720} 1471}
1721 1472
1722#ifdef CONFIG_HOTPLUG_CPU 1473static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1723
1724static void rcu_stop_cpu_kthread(int cpu)
1725{
1726}
1727
1728#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1729
1730static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1731{
1732}
1733
1734static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1735{ 1474{
1736} 1475}
1737 1476