aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-06-16 11:26:32 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-06-16 19:12:05 -0400
commitf8b7fc6b514f34a51875dd48dff70d4d17a54f38 (patch)
treedc6583851e3536516761b0fd9b07f5a3a1743ebb /kernel/rcutree_plugin.h
parenta46e0899eec7a3069bcadd45dfba7bf67c6ed016 (diff)
rcu: Move RCU_BOOST #ifdefs to header file
The commit "use softirq instead of kthreads except when RCU_BOOST=y" just applied #ifdef in place. This commit is a cleanup that moves the newly #ifdef'ed code to the header file kernel/rcutree_plugin.h. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h384
1 files changed, 384 insertions, 0 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 2772386c0421..14dc7dd00902 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1330,6 +1330,370 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1330 return 0; 1330 return 0;
1331} 1331}
1332 1332
1333#ifdef CONFIG_HOTPLUG_CPU
1334
1335/*
1336 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1337 */
1338static void rcu_stop_cpu_kthread(int cpu)
1339{
1340 struct task_struct *t;
1341
1342 /* Stop the CPU's kthread. */
1343 t = per_cpu(rcu_cpu_kthread_task, cpu);
1344 if (t != NULL) {
1345 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1346 kthread_stop(t);
1347 }
1348}
1349
1350#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1351
1352static void rcu_kthread_do_work(void)
1353{
1354 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1355 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1356 rcu_preempt_do_callbacks();
1357}
1358
1359/*
1360 * Wake up the specified per-rcu_node-structure kthread.
1361 * Because the per-rcu_node kthreads are immortal, we don't need
1362 * to do anything to keep them alive.
1363 */
1364static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1365{
1366 struct task_struct *t;
1367
1368 t = rnp->node_kthread_task;
1369 if (t != NULL)
1370 wake_up_process(t);
1371}
1372
1373/*
1374 * Set the specified CPU's kthread to run RT or not, as specified by
1375 * the to_rt argument. The CPU-hotplug locks are held, so the task
1376 * is not going away.
1377 */
1378static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1379{
1380 int policy;
1381 struct sched_param sp;
1382 struct task_struct *t;
1383
1384 t = per_cpu(rcu_cpu_kthread_task, cpu);
1385 if (t == NULL)
1386 return;
1387 if (to_rt) {
1388 policy = SCHED_FIFO;
1389 sp.sched_priority = RCU_KTHREAD_PRIO;
1390 } else {
1391 policy = SCHED_NORMAL;
1392 sp.sched_priority = 0;
1393 }
1394 sched_setscheduler_nocheck(t, policy, &sp);
1395}
1396
1397/*
1398 * Timer handler to initiate the waking up of per-CPU kthreads that
1399 * have yielded the CPU due to excess numbers of RCU callbacks.
1400 * We wake up the per-rcu_node kthread, which in turn will wake up
1401 * the booster kthread.
1402 */
1403static void rcu_cpu_kthread_timer(unsigned long arg)
1404{
1405 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1406 struct rcu_node *rnp = rdp->mynode;
1407
1408 atomic_or(rdp->grpmask, &rnp->wakemask);
1409 invoke_rcu_node_kthread(rnp);
1410}
1411
1412/*
1413 * Drop to non-real-time priority and yield, but only after posting a
1414 * timer that will cause us to regain our real-time priority if we
1415 * remain preempted. Either way, we restore our real-time priority
1416 * before returning.
1417 */
1418static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1419{
1420 struct sched_param sp;
1421 struct timer_list yield_timer;
1422
1423 setup_timer_on_stack(&yield_timer, f, arg);
1424 mod_timer(&yield_timer, jiffies + 2);
1425 sp.sched_priority = 0;
1426 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1427 set_user_nice(current, 19);
1428 schedule();
1429 sp.sched_priority = RCU_KTHREAD_PRIO;
1430 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1431 del_timer(&yield_timer);
1432}
1433
1434/*
1435 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1436 * This can happen while the corresponding CPU is either coming online
1437 * or going offline. We cannot wait until the CPU is fully online
1438 * before starting the kthread, because the various notifier functions
1439 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1440 * the corresponding CPU is online.
1441 *
1442 * Return 1 if the kthread needs to stop, 0 otherwise.
1443 *
1444 * Caller must disable bh. This function can momentarily enable it.
1445 */
1446static int rcu_cpu_kthread_should_stop(int cpu)
1447{
1448 while (cpu_is_offline(cpu) ||
1449 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1450 smp_processor_id() != cpu) {
1451 if (kthread_should_stop())
1452 return 1;
1453 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1454 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1455 local_bh_enable();
1456 schedule_timeout_uninterruptible(1);
1457 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1458 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1459 local_bh_disable();
1460 }
1461 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1462 return 0;
1463}
1464
1465/*
1466 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1467 * earlier RCU softirq.
1468 */
1469static int rcu_cpu_kthread(void *arg)
1470{
1471 int cpu = (int)(long)arg;
1472 unsigned long flags;
1473 int spincnt = 0;
1474 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1475 char work;
1476 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1477
1478 for (;;) {
1479 *statusp = RCU_KTHREAD_WAITING;
1480 rcu_wait(*workp != 0 || kthread_should_stop());
1481 local_bh_disable();
1482 if (rcu_cpu_kthread_should_stop(cpu)) {
1483 local_bh_enable();
1484 break;
1485 }
1486 *statusp = RCU_KTHREAD_RUNNING;
1487 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1488 local_irq_save(flags);
1489 work = *workp;
1490 *workp = 0;
1491 local_irq_restore(flags);
1492 if (work)
1493 rcu_kthread_do_work();
1494 local_bh_enable();
1495 if (*workp != 0)
1496 spincnt++;
1497 else
1498 spincnt = 0;
1499 if (spincnt > 10) {
1500 *statusp = RCU_KTHREAD_YIELDING;
1501 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1502 spincnt = 0;
1503 }
1504 }
1505 *statusp = RCU_KTHREAD_STOPPED;
1506 return 0;
1507}
1508
1509/*
1510 * Spawn a per-CPU kthread, setting up affinity and priority.
1511 * Because the CPU hotplug lock is held, no other CPU will be attempting
1512 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1513 * attempting to access it during boot, but the locking in kthread_bind()
1514 * will enforce sufficient ordering.
1515 *
1516 * Please note that we cannot simply refuse to wake up the per-CPU
1517 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1518 * which can result in softlockup complaints if the task ends up being
1519 * idle for more than a couple of minutes.
1520 *
1521 * However, please note also that we cannot bind the per-CPU kthread to its
1522 * CPU until that CPU is fully online. We also cannot wait until the
1523 * CPU is fully online before we create its per-CPU kthread, as this would
1524 * deadlock the system when CPU notifiers tried waiting for grace
1525 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1526 * is online. If its CPU is not yet fully online, then the code in
1527 * rcu_cpu_kthread() will wait until it is fully online, and then do
1528 * the binding.
1529 */
1530static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1531{
1532 struct sched_param sp;
1533 struct task_struct *t;
1534
1535 if (!rcu_kthreads_spawnable ||
1536 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1537 return 0;
1538 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1539 if (IS_ERR(t))
1540 return PTR_ERR(t);
1541 if (cpu_online(cpu))
1542 kthread_bind(t, cpu);
1543 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1544 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1545 sp.sched_priority = RCU_KTHREAD_PRIO;
1546 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1547 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1548 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1549 return 0;
1550}
1551
1552/*
1553 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1554 * kthreads when needed. We ignore requests to wake up kthreads
1555 * for offline CPUs, which is OK because force_quiescent_state()
1556 * takes care of this case.
1557 */
1558static int rcu_node_kthread(void *arg)
1559{
1560 int cpu;
1561 unsigned long flags;
1562 unsigned long mask;
1563 struct rcu_node *rnp = (struct rcu_node *)arg;
1564 struct sched_param sp;
1565 struct task_struct *t;
1566
1567 for (;;) {
1568 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1569 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1570 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1571 raw_spin_lock_irqsave(&rnp->lock, flags);
1572 mask = atomic_xchg(&rnp->wakemask, 0);
1573 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1574 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1575 if ((mask & 0x1) == 0)
1576 continue;
1577 preempt_disable();
1578 t = per_cpu(rcu_cpu_kthread_task, cpu);
1579 if (!cpu_online(cpu) || t == NULL) {
1580 preempt_enable();
1581 continue;
1582 }
1583 per_cpu(rcu_cpu_has_work, cpu) = 1;
1584 sp.sched_priority = RCU_KTHREAD_PRIO;
1585 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1586 preempt_enable();
1587 }
1588 }
1589 /* NOTREACHED */
1590 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1591 return 0;
1592}
1593
1594/*
1595 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1596 * served by the rcu_node in question. The CPU hotplug lock is still
1597 * held, so the value of rnp->qsmaskinit will be stable.
1598 *
1599 * We don't include outgoingcpu in the affinity set, use -1 if there is
1600 * no outgoing CPU. If there are no CPUs left in the affinity set,
1601 * this function allows the kthread to execute on any CPU.
1602 */
1603static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1604{
1605 cpumask_var_t cm;
1606 int cpu;
1607 unsigned long mask = rnp->qsmaskinit;
1608
1609 if (rnp->node_kthread_task == NULL)
1610 return;
1611 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1612 return;
1613 cpumask_clear(cm);
1614 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1615 if ((mask & 0x1) && cpu != outgoingcpu)
1616 cpumask_set_cpu(cpu, cm);
1617 if (cpumask_weight(cm) == 0) {
1618 cpumask_setall(cm);
1619 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1620 cpumask_clear_cpu(cpu, cm);
1621 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1622 }
1623 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1624 rcu_boost_kthread_setaffinity(rnp, cm);
1625 free_cpumask_var(cm);
1626}
1627
1628/*
1629 * Spawn a per-rcu_node kthread, setting priority and affinity.
1630 * Called during boot before online/offline can happen, or, if
1631 * during runtime, with the main CPU-hotplug locks held. So only
1632 * one of these can be executing at a time.
1633 */
1634static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1635 struct rcu_node *rnp)
1636{
1637 unsigned long flags;
1638 int rnp_index = rnp - &rsp->node[0];
1639 struct sched_param sp;
1640 struct task_struct *t;
1641
1642 if (!rcu_kthreads_spawnable ||
1643 rnp->qsmaskinit == 0)
1644 return 0;
1645 if (rnp->node_kthread_task == NULL) {
1646 t = kthread_create(rcu_node_kthread, (void *)rnp,
1647 "rcun%d", rnp_index);
1648 if (IS_ERR(t))
1649 return PTR_ERR(t);
1650 raw_spin_lock_irqsave(&rnp->lock, flags);
1651 rnp->node_kthread_task = t;
1652 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1653 sp.sched_priority = 99;
1654 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1655 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1656 }
1657 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1658}
1659
1660/*
1661 * Spawn all kthreads -- called as soon as the scheduler is running.
1662 */
1663static int __init rcu_spawn_kthreads(void)
1664{
1665 int cpu;
1666 struct rcu_node *rnp;
1667
1668 rcu_kthreads_spawnable = 1;
1669 for_each_possible_cpu(cpu) {
1670 per_cpu(rcu_cpu_has_work, cpu) = 0;
1671 if (cpu_online(cpu))
1672 (void)rcu_spawn_one_cpu_kthread(cpu);
1673 }
1674 rnp = rcu_get_root(rcu_state);
1675 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1676 if (NUM_RCU_NODES > 1) {
1677 rcu_for_each_leaf_node(rcu_state, rnp)
1678 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1679 }
1680 return 0;
1681}
1682early_initcall(rcu_spawn_kthreads);
1683
1684static void __cpuinit rcu_prepare_kthreads(int cpu)
1685{
1686 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1687 struct rcu_node *rnp = rdp->mynode;
1688
1689 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1690 if (rcu_kthreads_spawnable) {
1691 (void)rcu_spawn_one_cpu_kthread(cpu);
1692 if (rnp->node_kthread_task == NULL)
1693 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1694 }
1695}
1696
1333#else /* #ifdef CONFIG_RCU_BOOST */ 1697#else /* #ifdef CONFIG_RCU_BOOST */
1334 1698
1335static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) 1699static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
@@ -1346,6 +1710,26 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1346{ 1710{
1347} 1711}
1348 1712
1713#ifdef CONFIG_HOTPLUG_CPU
1714
1715static void rcu_stop_cpu_kthread(int cpu)
1716{
1717}
1718
1719#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1720
1721static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1722{
1723}
1724
1725static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1726{
1727}
1728
1729static void __cpuinit rcu_prepare_kthreads(int cpu)
1730{
1731}
1732
1349#endif /* #else #ifdef CONFIG_RCU_BOOST */ 1733#endif /* #else #ifdef CONFIG_RCU_BOOST */
1350 1734
1351#ifndef CONFIG_SMP 1735#ifndef CONFIG_SMP