diff options
| -rw-r--r-- | kernel/rcutree.c | 383 | ||||
| -rw-r--r-- | kernel/rcutree.h | 5 | ||||
| -rw-r--r-- | kernel/rcutree_plugin.h | 384 |
3 files changed, 390 insertions, 382 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 429d4949f0eb..7e59ffb3d0ba 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -1093,16 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
| 1093 | int need_report = 0; | 1093 | int need_report = 0; |
| 1094 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1094 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
| 1095 | struct rcu_node *rnp; | 1095 | struct rcu_node *rnp; |
| 1096 | #ifdef CONFIG_RCU_BOOST | ||
| 1097 | struct task_struct *t; | ||
| 1098 | 1096 | ||
| 1099 | /* Stop the CPU's kthread. */ | 1097 | rcu_stop_cpu_kthread(cpu); |
| 1100 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1101 | if (t != NULL) { | ||
| 1102 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
| 1103 | kthread_stop(t); | ||
| 1104 | } | ||
| 1105 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
| 1106 | 1098 | ||
| 1107 | /* Exclude any attempts to start a new grace period. */ | 1099 | /* Exclude any attempts to start a new grace period. */ |
| 1108 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1100 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
| @@ -1453,17 +1445,6 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
| 1453 | invoke_rcu_callbacks(rsp, rdp); | 1445 | invoke_rcu_callbacks(rsp, rdp); |
| 1454 | } | 1446 | } |
| 1455 | 1447 | ||
| 1456 | #ifdef CONFIG_RCU_BOOST | ||
| 1457 | |||
| 1458 | static void rcu_kthread_do_work(void) | ||
| 1459 | { | ||
| 1460 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | ||
| 1461 | rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | ||
| 1462 | rcu_preempt_do_callbacks(); | ||
| 1463 | } | ||
| 1464 | |||
| 1465 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
| 1466 | |||
| 1467 | /* | 1448 | /* |
| 1468 | * Do softirq processing for the current CPU. | 1449 | * Do softirq processing for the current CPU. |
| 1469 | */ | 1450 | */ |
| @@ -1498,345 +1479,6 @@ static void invoke_rcu_core(void) | |||
| 1498 | raise_softirq(RCU_SOFTIRQ); | 1479 | raise_softirq(RCU_SOFTIRQ); |
| 1499 | } | 1480 | } |
| 1500 | 1481 | ||
| 1501 | #ifdef CONFIG_RCU_BOOST | ||
| 1502 | |||
| 1503 | /* | ||
| 1504 | * Wake up the specified per-rcu_node-structure kthread. | ||
| 1505 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
| 1506 | * to do anything to keep them alive. | ||
| 1507 | */ | ||
| 1508 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
| 1509 | { | ||
| 1510 | struct task_struct *t; | ||
| 1511 | |||
| 1512 | t = rnp->node_kthread_task; | ||
| 1513 | if (t != NULL) | ||
| 1514 | wake_up_process(t); | ||
| 1515 | } | ||
| 1516 | |||
| 1517 | /* | ||
| 1518 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
| 1519 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
| 1520 | * is not going away. | ||
| 1521 | */ | ||
| 1522 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
| 1523 | { | ||
| 1524 | int policy; | ||
| 1525 | struct sched_param sp; | ||
| 1526 | struct task_struct *t; | ||
| 1527 | |||
| 1528 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1529 | if (t == NULL) | ||
| 1530 | return; | ||
| 1531 | if (to_rt) { | ||
| 1532 | policy = SCHED_FIFO; | ||
| 1533 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1534 | } else { | ||
| 1535 | policy = SCHED_NORMAL; | ||
| 1536 | sp.sched_priority = 0; | ||
| 1537 | } | ||
| 1538 | sched_setscheduler_nocheck(t, policy, &sp); | ||
| 1539 | } | ||
| 1540 | |||
| 1541 | /* | ||
| 1542 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
| 1543 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
| 1544 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
| 1545 | * the booster kthread. | ||
| 1546 | */ | ||
| 1547 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
| 1548 | { | ||
| 1549 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
| 1550 | struct rcu_node *rnp = rdp->mynode; | ||
| 1551 | |||
| 1552 | atomic_or(rdp->grpmask, &rnp->wakemask); | ||
| 1553 | invoke_rcu_node_kthread(rnp); | ||
| 1554 | } | ||
| 1555 | |||
| 1556 | /* | ||
| 1557 | * Drop to non-real-time priority and yield, but only after posting a | ||
| 1558 | * timer that will cause us to regain our real-time priority if we | ||
| 1559 | * remain preempted. Either way, we restore our real-time priority | ||
| 1560 | * before returning. | ||
| 1561 | */ | ||
| 1562 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
| 1563 | { | ||
| 1564 | struct sched_param sp; | ||
| 1565 | struct timer_list yield_timer; | ||
| 1566 | |||
| 1567 | setup_timer_on_stack(&yield_timer, f, arg); | ||
| 1568 | mod_timer(&yield_timer, jiffies + 2); | ||
| 1569 | sp.sched_priority = 0; | ||
| 1570 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
| 1571 | set_user_nice(current, 19); | ||
| 1572 | schedule(); | ||
| 1573 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1574 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
| 1575 | del_timer(&yield_timer); | ||
| 1576 | } | ||
| 1577 | |||
| 1578 | /* | ||
| 1579 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
| 1580 | * This can happen while the corresponding CPU is either coming online | ||
| 1581 | * or going offline. We cannot wait until the CPU is fully online | ||
| 1582 | * before starting the kthread, because the various notifier functions | ||
| 1583 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
| 1584 | * the corresponding CPU is online. | ||
| 1585 | * | ||
| 1586 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
| 1587 | * | ||
| 1588 | * Caller must disable bh. This function can momentarily enable it. | ||
| 1589 | */ | ||
| 1590 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
| 1591 | { | ||
| 1592 | while (cpu_is_offline(cpu) || | ||
| 1593 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
| 1594 | smp_processor_id() != cpu) { | ||
| 1595 | if (kthread_should_stop()) | ||
| 1596 | return 1; | ||
| 1597 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
| 1598 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
| 1599 | local_bh_enable(); | ||
| 1600 | schedule_timeout_uninterruptible(1); | ||
| 1601 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
| 1602 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
| 1603 | local_bh_disable(); | ||
| 1604 | } | ||
| 1605 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
| 1606 | return 0; | ||
| 1607 | } | ||
| 1608 | |||
| 1609 | /* | ||
| 1610 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
| 1611 | * earlier RCU softirq. | ||
| 1612 | */ | ||
| 1613 | static int rcu_cpu_kthread(void *arg) | ||
| 1614 | { | ||
| 1615 | int cpu = (int)(long)arg; | ||
| 1616 | unsigned long flags; | ||
| 1617 | int spincnt = 0; | ||
| 1618 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
| 1619 | char work; | ||
| 1620 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
| 1621 | |||
| 1622 | for (;;) { | ||
| 1623 | *statusp = RCU_KTHREAD_WAITING; | ||
| 1624 | rcu_wait(*workp != 0 || kthread_should_stop()); | ||
| 1625 | local_bh_disable(); | ||
| 1626 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
| 1627 | local_bh_enable(); | ||
| 1628 | break; | ||
| 1629 | } | ||
| 1630 | *statusp = RCU_KTHREAD_RUNNING; | ||
| 1631 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
| 1632 | local_irq_save(flags); | ||
| 1633 | work = *workp; | ||
| 1634 | *workp = 0; | ||
| 1635 | local_irq_restore(flags); | ||
| 1636 | if (work) | ||
| 1637 | rcu_kthread_do_work(); | ||
| 1638 | local_bh_enable(); | ||
| 1639 | if (*workp != 0) | ||
| 1640 | spincnt++; | ||
| 1641 | else | ||
| 1642 | spincnt = 0; | ||
| 1643 | if (spincnt > 10) { | ||
| 1644 | *statusp = RCU_KTHREAD_YIELDING; | ||
| 1645 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
| 1646 | spincnt = 0; | ||
| 1647 | } | ||
| 1648 | } | ||
| 1649 | *statusp = RCU_KTHREAD_STOPPED; | ||
| 1650 | return 0; | ||
| 1651 | } | ||
| 1652 | |||
| 1653 | /* | ||
| 1654 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
| 1655 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
| 1656 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
| 1657 | * attempting to access it during boot, but the locking in kthread_bind() | ||
| 1658 | * will enforce sufficient ordering. | ||
| 1659 | * | ||
| 1660 | * Please note that we cannot simply refuse to wake up the per-CPU | ||
| 1661 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | ||
| 1662 | * which can result in softlockup complaints if the task ends up being | ||
| 1663 | * idle for more than a couple of minutes. | ||
| 1664 | * | ||
| 1665 | * However, please note also that we cannot bind the per-CPU kthread to its | ||
| 1666 | * CPU until that CPU is fully online. We also cannot wait until the | ||
| 1667 | * CPU is fully online before we create its per-CPU kthread, as this would | ||
| 1668 | * deadlock the system when CPU notifiers tried waiting for grace | ||
| 1669 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | ||
| 1670 | * is online. If its CPU is not yet fully online, then the code in | ||
| 1671 | * rcu_cpu_kthread() will wait until it is fully online, and then do | ||
| 1672 | * the binding. | ||
| 1673 | */ | ||
| 1674 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
| 1675 | { | ||
| 1676 | struct sched_param sp; | ||
| 1677 | struct task_struct *t; | ||
| 1678 | |||
| 1679 | if (!rcu_kthreads_spawnable || | ||
| 1680 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
| 1681 | return 0; | ||
| 1682 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
| 1683 | if (IS_ERR(t)) | ||
| 1684 | return PTR_ERR(t); | ||
| 1685 | if (cpu_online(cpu)) | ||
| 1686 | kthread_bind(t, cpu); | ||
| 1687 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
| 1688 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
| 1689 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1690 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1691 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
| 1692 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | ||
| 1693 | return 0; | ||
| 1694 | } | ||
| 1695 | |||
| 1696 | /* | ||
| 1697 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
| 1698 | * kthreads when needed. We ignore requests to wake up kthreads | ||
| 1699 | * for offline CPUs, which is OK because force_quiescent_state() | ||
| 1700 | * takes care of this case. | ||
| 1701 | */ | ||
| 1702 | static int rcu_node_kthread(void *arg) | ||
| 1703 | { | ||
| 1704 | int cpu; | ||
| 1705 | unsigned long flags; | ||
| 1706 | unsigned long mask; | ||
| 1707 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
| 1708 | struct sched_param sp; | ||
| 1709 | struct task_struct *t; | ||
| 1710 | |||
| 1711 | for (;;) { | ||
| 1712 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
| 1713 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | ||
| 1714 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
| 1715 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
| 1716 | mask = atomic_xchg(&rnp->wakemask, 0); | ||
| 1717 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
| 1718 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
| 1719 | if ((mask & 0x1) == 0) | ||
| 1720 | continue; | ||
| 1721 | preempt_disable(); | ||
| 1722 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1723 | if (!cpu_online(cpu) || t == NULL) { | ||
| 1724 | preempt_enable(); | ||
| 1725 | continue; | ||
| 1726 | } | ||
| 1727 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
| 1728 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1729 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1730 | preempt_enable(); | ||
| 1731 | } | ||
| 1732 | } | ||
| 1733 | /* NOTREACHED */ | ||
| 1734 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
| 1735 | return 0; | ||
| 1736 | } | ||
| 1737 | |||
| 1738 | /* | ||
| 1739 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
| 1740 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
| 1741 | * held, so the value of rnp->qsmaskinit will be stable. | ||
| 1742 | * | ||
| 1743 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
| 1744 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
| 1745 | * this function allows the kthread to execute on any CPU. | ||
| 1746 | */ | ||
| 1747 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
| 1748 | { | ||
| 1749 | cpumask_var_t cm; | ||
| 1750 | int cpu; | ||
| 1751 | unsigned long mask = rnp->qsmaskinit; | ||
| 1752 | |||
| 1753 | if (rnp->node_kthread_task == NULL) | ||
| 1754 | return; | ||
| 1755 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
| 1756 | return; | ||
| 1757 | cpumask_clear(cm); | ||
| 1758 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
| 1759 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
| 1760 | cpumask_set_cpu(cpu, cm); | ||
| 1761 | if (cpumask_weight(cm) == 0) { | ||
| 1762 | cpumask_setall(cm); | ||
| 1763 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
| 1764 | cpumask_clear_cpu(cpu, cm); | ||
| 1765 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
| 1766 | } | ||
| 1767 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | ||
| 1768 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
| 1769 | free_cpumask_var(cm); | ||
| 1770 | } | ||
| 1771 | |||
| 1772 | /* | ||
| 1773 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
| 1774 | * Called during boot before online/offline can happen, or, if | ||
| 1775 | * during runtime, with the main CPU-hotplug locks held. So only | ||
| 1776 | * one of these can be executing at a time. | ||
| 1777 | */ | ||
| 1778 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
| 1779 | struct rcu_node *rnp) | ||
| 1780 | { | ||
| 1781 | unsigned long flags; | ||
| 1782 | int rnp_index = rnp - &rsp->node[0]; | ||
| 1783 | struct sched_param sp; | ||
| 1784 | struct task_struct *t; | ||
| 1785 | |||
| 1786 | if (!rcu_kthreads_spawnable || | ||
| 1787 | rnp->qsmaskinit == 0) | ||
| 1788 | return 0; | ||
| 1789 | if (rnp->node_kthread_task == NULL) { | ||
| 1790 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
| 1791 | "rcun%d", rnp_index); | ||
| 1792 | if (IS_ERR(t)) | ||
| 1793 | return PTR_ERR(t); | ||
| 1794 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
| 1795 | rnp->node_kthread_task = t; | ||
| 1796 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 1797 | sp.sched_priority = 99; | ||
| 1798 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1799 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | ||
| 1800 | } | ||
| 1801 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
| 1802 | } | ||
| 1803 | |||
| 1804 | /* | ||
| 1805 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
| 1806 | */ | ||
| 1807 | static int __init rcu_spawn_kthreads(void) | ||
| 1808 | { | ||
| 1809 | int cpu; | ||
| 1810 | struct rcu_node *rnp; | ||
| 1811 | |||
| 1812 | rcu_kthreads_spawnable = 1; | ||
| 1813 | for_each_possible_cpu(cpu) { | ||
| 1814 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
| 1815 | if (cpu_online(cpu)) | ||
| 1816 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
| 1817 | } | ||
| 1818 | rnp = rcu_get_root(rcu_state); | ||
| 1819 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1820 | if (NUM_RCU_NODES > 1) { | ||
| 1821 | rcu_for_each_leaf_node(rcu_state, rnp) | ||
| 1822 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1823 | } | ||
| 1824 | return 0; | ||
| 1825 | } | ||
| 1826 | early_initcall(rcu_spawn_kthreads); | ||
| 1827 | |||
| 1828 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
| 1829 | |||
| 1830 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
| 1831 | { | ||
| 1832 | } | ||
| 1833 | |||
| 1834 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
| 1835 | { | ||
| 1836 | } | ||
| 1837 | |||
| 1838 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
| 1839 | |||
| 1840 | static void | 1482 | static void |
| 1841 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | 1483 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), |
| 1842 | struct rcu_state *rsp) | 1484 | struct rcu_state *rsp) |
| @@ -2243,29 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu) | |||
| 2243 | rcu_preempt_init_percpu_data(cpu); | 1885 | rcu_preempt_init_percpu_data(cpu); |
| 2244 | } | 1886 | } |
| 2245 | 1887 | ||
| 2246 | #ifdef CONFIG_RCU_BOOST | ||
| 2247 | |||
| 2248 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
| 2249 | { | ||
| 2250 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
| 2251 | struct rcu_node *rnp = rdp->mynode; | ||
| 2252 | |||
| 2253 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
| 2254 | if (rcu_kthreads_spawnable) { | ||
| 2255 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
| 2256 | if (rnp->node_kthread_task == NULL) | ||
| 2257 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 2258 | } | ||
| 2259 | } | ||
| 2260 | |||
| 2261 | #else /* #ifdef CONFIG_RCU_BOOST */ | ||
| 2262 | |||
| 2263 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
| 2264 | { | ||
| 2265 | } | ||
| 2266 | |||
| 2267 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | ||
| 2268 | |||
| 2269 | /* | 1888 | /* |
| 2270 | * Handle CPU online/offline notification events. | 1889 | * Handle CPU online/offline notification events. |
| 2271 | */ | 1890 | */ |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 434288c7ad88..01b2ccda26fb 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -427,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | |||
| 427 | #ifdef CONFIG_HOTPLUG_CPU | 427 | #ifdef CONFIG_HOTPLUG_CPU |
| 428 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, | 428 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, |
| 429 | unsigned long flags); | 429 | unsigned long flags); |
| 430 | static void rcu_stop_cpu_kthread(int cpu); | ||
| 430 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 431 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
| 431 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); | 432 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
| 432 | static void rcu_print_task_stall(struct rcu_node *rnp); | 433 | static void rcu_print_task_stall(struct rcu_node *rnp); |
| @@ -460,6 +461,10 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | |||
| 460 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 461 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
| 461 | struct rcu_node *rnp, | 462 | struct rcu_node *rnp, |
| 462 | int rnp_index); | 463 | int rnp_index); |
| 464 | static void invoke_rcu_node_kthread(struct rcu_node *rnp); | ||
| 465 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg); | ||
| 463 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 466 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
| 467 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt); | ||
| 468 | static void __cpuinit rcu_prepare_kthreads(int cpu); | ||
| 464 | 469 | ||
| 465 | #endif /* #ifndef RCU_TREE_NONCORE */ | 470 | #endif /* #ifndef RCU_TREE_NONCORE */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 2772386c0421..14dc7dd00902 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
| @@ -1330,6 +1330,370 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
| 1330 | return 0; | 1330 | return 0; |
| 1331 | } | 1331 | } |
| 1332 | 1332 | ||
| 1333 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 1334 | |||
| 1335 | /* | ||
| 1336 | * Stop the RCU's per-CPU kthread when its CPU goes offline,. | ||
| 1337 | */ | ||
| 1338 | static void rcu_stop_cpu_kthread(int cpu) | ||
| 1339 | { | ||
| 1340 | struct task_struct *t; | ||
| 1341 | |||
| 1342 | /* Stop the CPU's kthread. */ | ||
| 1343 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1344 | if (t != NULL) { | ||
| 1345 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
| 1346 | kthread_stop(t); | ||
| 1347 | } | ||
| 1348 | } | ||
| 1349 | |||
| 1350 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 1351 | |||
| 1352 | static void rcu_kthread_do_work(void) | ||
| 1353 | { | ||
| 1354 | rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); | ||
| 1355 | rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); | ||
| 1356 | rcu_preempt_do_callbacks(); | ||
| 1357 | } | ||
| 1358 | |||
| 1359 | /* | ||
| 1360 | * Wake up the specified per-rcu_node-structure kthread. | ||
| 1361 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
| 1362 | * to do anything to keep them alive. | ||
| 1363 | */ | ||
| 1364 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
| 1365 | { | ||
| 1366 | struct task_struct *t; | ||
| 1367 | |||
| 1368 | t = rnp->node_kthread_task; | ||
| 1369 | if (t != NULL) | ||
| 1370 | wake_up_process(t); | ||
| 1371 | } | ||
| 1372 | |||
| 1373 | /* | ||
| 1374 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
| 1375 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
| 1376 | * is not going away. | ||
| 1377 | */ | ||
| 1378 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
| 1379 | { | ||
| 1380 | int policy; | ||
| 1381 | struct sched_param sp; | ||
| 1382 | struct task_struct *t; | ||
| 1383 | |||
| 1384 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1385 | if (t == NULL) | ||
| 1386 | return; | ||
| 1387 | if (to_rt) { | ||
| 1388 | policy = SCHED_FIFO; | ||
| 1389 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1390 | } else { | ||
| 1391 | policy = SCHED_NORMAL; | ||
| 1392 | sp.sched_priority = 0; | ||
| 1393 | } | ||
| 1394 | sched_setscheduler_nocheck(t, policy, &sp); | ||
| 1395 | } | ||
| 1396 | |||
| 1397 | /* | ||
| 1398 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
| 1399 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
| 1400 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
| 1401 | * the booster kthread. | ||
| 1402 | */ | ||
| 1403 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
| 1404 | { | ||
| 1405 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
| 1406 | struct rcu_node *rnp = rdp->mynode; | ||
| 1407 | |||
| 1408 | atomic_or(rdp->grpmask, &rnp->wakemask); | ||
| 1409 | invoke_rcu_node_kthread(rnp); | ||
| 1410 | } | ||
| 1411 | |||
| 1412 | /* | ||
| 1413 | * Drop to non-real-time priority and yield, but only after posting a | ||
| 1414 | * timer that will cause us to regain our real-time priority if we | ||
| 1415 | * remain preempted. Either way, we restore our real-time priority | ||
| 1416 | * before returning. | ||
| 1417 | */ | ||
| 1418 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
| 1419 | { | ||
| 1420 | struct sched_param sp; | ||
| 1421 | struct timer_list yield_timer; | ||
| 1422 | |||
| 1423 | setup_timer_on_stack(&yield_timer, f, arg); | ||
| 1424 | mod_timer(&yield_timer, jiffies + 2); | ||
| 1425 | sp.sched_priority = 0; | ||
| 1426 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
| 1427 | set_user_nice(current, 19); | ||
| 1428 | schedule(); | ||
| 1429 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1430 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
| 1431 | del_timer(&yield_timer); | ||
| 1432 | } | ||
| 1433 | |||
| 1434 | /* | ||
| 1435 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
| 1436 | * This can happen while the corresponding CPU is either coming online | ||
| 1437 | * or going offline. We cannot wait until the CPU is fully online | ||
| 1438 | * before starting the kthread, because the various notifier functions | ||
| 1439 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
| 1440 | * the corresponding CPU is online. | ||
| 1441 | * | ||
| 1442 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
| 1443 | * | ||
| 1444 | * Caller must disable bh. This function can momentarily enable it. | ||
| 1445 | */ | ||
| 1446 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
| 1447 | { | ||
| 1448 | while (cpu_is_offline(cpu) || | ||
| 1449 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
| 1450 | smp_processor_id() != cpu) { | ||
| 1451 | if (kthread_should_stop()) | ||
| 1452 | return 1; | ||
| 1453 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
| 1454 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
| 1455 | local_bh_enable(); | ||
| 1456 | schedule_timeout_uninterruptible(1); | ||
| 1457 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
| 1458 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
| 1459 | local_bh_disable(); | ||
| 1460 | } | ||
| 1461 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
| 1462 | return 0; | ||
| 1463 | } | ||
| 1464 | |||
| 1465 | /* | ||
| 1466 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
| 1467 | * earlier RCU softirq. | ||
| 1468 | */ | ||
| 1469 | static int rcu_cpu_kthread(void *arg) | ||
| 1470 | { | ||
| 1471 | int cpu = (int)(long)arg; | ||
| 1472 | unsigned long flags; | ||
| 1473 | int spincnt = 0; | ||
| 1474 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
| 1475 | char work; | ||
| 1476 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
| 1477 | |||
| 1478 | for (;;) { | ||
| 1479 | *statusp = RCU_KTHREAD_WAITING; | ||
| 1480 | rcu_wait(*workp != 0 || kthread_should_stop()); | ||
| 1481 | local_bh_disable(); | ||
| 1482 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
| 1483 | local_bh_enable(); | ||
| 1484 | break; | ||
| 1485 | } | ||
| 1486 | *statusp = RCU_KTHREAD_RUNNING; | ||
| 1487 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
| 1488 | local_irq_save(flags); | ||
| 1489 | work = *workp; | ||
| 1490 | *workp = 0; | ||
| 1491 | local_irq_restore(flags); | ||
| 1492 | if (work) | ||
| 1493 | rcu_kthread_do_work(); | ||
| 1494 | local_bh_enable(); | ||
| 1495 | if (*workp != 0) | ||
| 1496 | spincnt++; | ||
| 1497 | else | ||
| 1498 | spincnt = 0; | ||
| 1499 | if (spincnt > 10) { | ||
| 1500 | *statusp = RCU_KTHREAD_YIELDING; | ||
| 1501 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
| 1502 | spincnt = 0; | ||
| 1503 | } | ||
| 1504 | } | ||
| 1505 | *statusp = RCU_KTHREAD_STOPPED; | ||
| 1506 | return 0; | ||
| 1507 | } | ||
| 1508 | |||
| 1509 | /* | ||
| 1510 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
| 1511 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
| 1512 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
| 1513 | * attempting to access it during boot, but the locking in kthread_bind() | ||
| 1514 | * will enforce sufficient ordering. | ||
| 1515 | * | ||
| 1516 | * Please note that we cannot simply refuse to wake up the per-CPU | ||
| 1517 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | ||
| 1518 | * which can result in softlockup complaints if the task ends up being | ||
| 1519 | * idle for more than a couple of minutes. | ||
| 1520 | * | ||
| 1521 | * However, please note also that we cannot bind the per-CPU kthread to its | ||
| 1522 | * CPU until that CPU is fully online. We also cannot wait until the | ||
| 1523 | * CPU is fully online before we create its per-CPU kthread, as this would | ||
| 1524 | * deadlock the system when CPU notifiers tried waiting for grace | ||
| 1525 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | ||
| 1526 | * is online. If its CPU is not yet fully online, then the code in | ||
| 1527 | * rcu_cpu_kthread() will wait until it is fully online, and then do | ||
| 1528 | * the binding. | ||
| 1529 | */ | ||
| 1530 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
| 1531 | { | ||
| 1532 | struct sched_param sp; | ||
| 1533 | struct task_struct *t; | ||
| 1534 | |||
| 1535 | if (!rcu_kthreads_spawnable || | ||
| 1536 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
| 1537 | return 0; | ||
| 1538 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
| 1539 | if (IS_ERR(t)) | ||
| 1540 | return PTR_ERR(t); | ||
| 1541 | if (cpu_online(cpu)) | ||
| 1542 | kthread_bind(t, cpu); | ||
| 1543 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
| 1544 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
| 1545 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1546 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1547 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
| 1548 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | ||
| 1549 | return 0; | ||
| 1550 | } | ||
| 1551 | |||
| 1552 | /* | ||
| 1553 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
| 1554 | * kthreads when needed. We ignore requests to wake up kthreads | ||
| 1555 | * for offline CPUs, which is OK because force_quiescent_state() | ||
| 1556 | * takes care of this case. | ||
| 1557 | */ | ||
| 1558 | static int rcu_node_kthread(void *arg) | ||
| 1559 | { | ||
| 1560 | int cpu; | ||
| 1561 | unsigned long flags; | ||
| 1562 | unsigned long mask; | ||
| 1563 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
| 1564 | struct sched_param sp; | ||
| 1565 | struct task_struct *t; | ||
| 1566 | |||
| 1567 | for (;;) { | ||
| 1568 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
| 1569 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | ||
| 1570 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
| 1571 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
| 1572 | mask = atomic_xchg(&rnp->wakemask, 0); | ||
| 1573 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
| 1574 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
| 1575 | if ((mask & 0x1) == 0) | ||
| 1576 | continue; | ||
| 1577 | preempt_disable(); | ||
| 1578 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
| 1579 | if (!cpu_online(cpu) || t == NULL) { | ||
| 1580 | preempt_enable(); | ||
| 1581 | continue; | ||
| 1582 | } | ||
| 1583 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
| 1584 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
| 1585 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1586 | preempt_enable(); | ||
| 1587 | } | ||
| 1588 | } | ||
| 1589 | /* NOTREACHED */ | ||
| 1590 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
| 1591 | return 0; | ||
| 1592 | } | ||
| 1593 | |||
| 1594 | /* | ||
| 1595 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
| 1596 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
| 1597 | * held, so the value of rnp->qsmaskinit will be stable. | ||
| 1598 | * | ||
| 1599 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
| 1600 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
| 1601 | * this function allows the kthread to execute on any CPU. | ||
| 1602 | */ | ||
| 1603 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
| 1604 | { | ||
| 1605 | cpumask_var_t cm; | ||
| 1606 | int cpu; | ||
| 1607 | unsigned long mask = rnp->qsmaskinit; | ||
| 1608 | |||
| 1609 | if (rnp->node_kthread_task == NULL) | ||
| 1610 | return; | ||
| 1611 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
| 1612 | return; | ||
| 1613 | cpumask_clear(cm); | ||
| 1614 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
| 1615 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
| 1616 | cpumask_set_cpu(cpu, cm); | ||
| 1617 | if (cpumask_weight(cm) == 0) { | ||
| 1618 | cpumask_setall(cm); | ||
| 1619 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
| 1620 | cpumask_clear_cpu(cpu, cm); | ||
| 1621 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
| 1622 | } | ||
| 1623 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | ||
| 1624 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
| 1625 | free_cpumask_var(cm); | ||
| 1626 | } | ||
| 1627 | |||
| 1628 | /* | ||
| 1629 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
| 1630 | * Called during boot before online/offline can happen, or, if | ||
| 1631 | * during runtime, with the main CPU-hotplug locks held. So only | ||
| 1632 | * one of these can be executing at a time. | ||
| 1633 | */ | ||
| 1634 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
| 1635 | struct rcu_node *rnp) | ||
| 1636 | { | ||
| 1637 | unsigned long flags; | ||
| 1638 | int rnp_index = rnp - &rsp->node[0]; | ||
| 1639 | struct sched_param sp; | ||
| 1640 | struct task_struct *t; | ||
| 1641 | |||
| 1642 | if (!rcu_kthreads_spawnable || | ||
| 1643 | rnp->qsmaskinit == 0) | ||
| 1644 | return 0; | ||
| 1645 | if (rnp->node_kthread_task == NULL) { | ||
| 1646 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
| 1647 | "rcun%d", rnp_index); | ||
| 1648 | if (IS_ERR(t)) | ||
| 1649 | return PTR_ERR(t); | ||
| 1650 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
| 1651 | rnp->node_kthread_task = t; | ||
| 1652 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
| 1653 | sp.sched_priority = 99; | ||
| 1654 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
| 1655 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | ||
| 1656 | } | ||
| 1657 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | /* | ||
| 1661 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
| 1662 | */ | ||
| 1663 | static int __init rcu_spawn_kthreads(void) | ||
| 1664 | { | ||
| 1665 | int cpu; | ||
| 1666 | struct rcu_node *rnp; | ||
| 1667 | |||
| 1668 | rcu_kthreads_spawnable = 1; | ||
| 1669 | for_each_possible_cpu(cpu) { | ||
| 1670 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
| 1671 | if (cpu_online(cpu)) | ||
| 1672 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
| 1673 | } | ||
| 1674 | rnp = rcu_get_root(rcu_state); | ||
| 1675 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1676 | if (NUM_RCU_NODES > 1) { | ||
| 1677 | rcu_for_each_leaf_node(rcu_state, rnp) | ||
| 1678 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1679 | } | ||
| 1680 | return 0; | ||
| 1681 | } | ||
| 1682 | early_initcall(rcu_spawn_kthreads); | ||
| 1683 | |||
| 1684 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
| 1685 | { | ||
| 1686 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
| 1687 | struct rcu_node *rnp = rdp->mynode; | ||
| 1688 | |||
| 1689 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
| 1690 | if (rcu_kthreads_spawnable) { | ||
| 1691 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
| 1692 | if (rnp->node_kthread_task == NULL) | ||
| 1693 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
| 1694 | } | ||
| 1695 | } | ||
| 1696 | |||
| 1333 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1697 | #else /* #ifdef CONFIG_RCU_BOOST */ |
| 1334 | 1698 | ||
| 1335 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | 1699 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
| @@ -1346,6 +1710,26 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |||
| 1346 | { | 1710 | { |
| 1347 | } | 1711 | } |
| 1348 | 1712 | ||
| 1713 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 1714 | |||
| 1715 | static void rcu_stop_cpu_kthread(int cpu) | ||
| 1716 | { | ||
| 1717 | } | ||
| 1718 | |||
| 1719 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
| 1720 | |||
| 1721 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
| 1722 | { | ||
| 1723 | } | ||
| 1724 | |||
| 1725 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
| 1726 | { | ||
| 1727 | } | ||
| 1728 | |||
| 1729 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
| 1730 | { | ||
| 1731 | } | ||
| 1732 | |||
| 1349 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | 1733 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
| 1350 | 1734 | ||
| 1351 | #ifndef CONFIG_SMP | 1735 | #ifndef CONFIG_SMP |
