diff options
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r-- | kernel/rcutree.c | 398 |
1 files changed, 19 insertions, 379 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 89419ff92e99..7e59ffb3d0ba 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state; | |||
87 | int rcu_scheduler_active __read_mostly; | 87 | int rcu_scheduler_active __read_mostly; |
88 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 88 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
89 | 89 | ||
90 | #ifdef CONFIG_RCU_BOOST | ||
91 | |||
90 | /* | 92 | /* |
91 | * Control variables for per-CPU and per-rcu_node kthreads. These | 93 | * Control variables for per-CPU and per-rcu_node kthreads. These |
92 | * handle all flavors of RCU. | 94 | * handle all flavors of RCU. |
@@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | |||
98 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | 100 | DEFINE_PER_CPU(char, rcu_cpu_has_work); |
99 | static char rcu_kthreads_spawnable; | 101 | static char rcu_kthreads_spawnable; |
100 | 102 | ||
103 | #endif /* #ifdef CONFIG_RCU_BOOST */ | ||
104 | |||
101 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); | 105 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); |
102 | static void invoke_rcu_cpu_kthread(void); | 106 | static void invoke_rcu_core(void); |
107 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | ||
103 | 108 | ||
104 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ | 109 | #define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ |
105 | 110 | ||
@@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1088 | int need_report = 0; | 1093 | int need_report = 0; |
1089 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); | 1094 | struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); |
1090 | struct rcu_node *rnp; | 1095 | struct rcu_node *rnp; |
1091 | struct task_struct *t; | ||
1092 | 1096 | ||
1093 | /* Stop the CPU's kthread. */ | 1097 | rcu_stop_cpu_kthread(cpu); |
1094 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1095 | if (t != NULL) { | ||
1096 | per_cpu(rcu_cpu_kthread_task, cpu) = NULL; | ||
1097 | kthread_stop(t); | ||
1098 | } | ||
1099 | 1098 | ||
1100 | /* Exclude any attempts to start a new grace period. */ | 1099 | /* Exclude any attempts to start a new grace period. */ |
1101 | raw_spin_lock_irqsave(&rsp->onofflock, flags); | 1100 | raw_spin_lock_irqsave(&rsp->onofflock, flags); |
@@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1231 | 1230 | ||
1232 | /* Re-raise the RCU softirq if there are callbacks remaining. */ | 1231 | /* Re-raise the RCU softirq if there are callbacks remaining. */ |
1233 | if (cpu_has_callbacks_ready_to_invoke(rdp)) | 1232 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
1234 | invoke_rcu_cpu_kthread(); | 1233 | invoke_rcu_core(); |
1235 | } | 1234 | } |
1236 | 1235 | ||
1237 | /* | 1236 | /* |
@@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user) | |||
1277 | } | 1276 | } |
1278 | rcu_preempt_check_callbacks(cpu); | 1277 | rcu_preempt_check_callbacks(cpu); |
1279 | if (rcu_pending(cpu)) | 1278 | if (rcu_pending(cpu)) |
1280 | invoke_rcu_cpu_kthread(); | 1279 | invoke_rcu_core(); |
1281 | } | 1280 | } |
1282 | 1281 | ||
1283 | #ifdef CONFIG_SMP | 1282 | #ifdef CONFIG_SMP |
@@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) | |||
1442 | } | 1441 | } |
1443 | 1442 | ||
1444 | /* If there are callbacks ready, invoke them. */ | 1443 | /* If there are callbacks ready, invoke them. */ |
1445 | rcu_do_batch(rsp, rdp); | 1444 | if (cpu_has_callbacks_ready_to_invoke(rdp)) |
1445 | invoke_rcu_callbacks(rsp, rdp); | ||
1446 | } | 1446 | } |
1447 | 1447 | ||
1448 | /* | 1448 | /* |
1449 | * Do softirq processing for the current CPU. | 1449 | * Do softirq processing for the current CPU. |
1450 | */ | 1450 | */ |
1451 | static void rcu_process_callbacks(void) | 1451 | static void rcu_process_callbacks(struct softirq_action *unused) |
1452 | { | 1452 | { |
1453 | __rcu_process_callbacks(&rcu_sched_state, | 1453 | __rcu_process_callbacks(&rcu_sched_state, |
1454 | &__get_cpu_var(rcu_sched_data)); | 1454 | &__get_cpu_var(rcu_sched_data)); |
@@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void) | |||
1465 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task | 1465 | * the current CPU with interrupts disabled, the rcu_cpu_kthread_task |
1466 | * cannot disappear out from under us. | 1466 | * cannot disappear out from under us. |
1467 | */ | 1467 | */ |
1468 | static void invoke_rcu_cpu_kthread(void) | 1468 | static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) |
1469 | { | ||
1470 | unsigned long flags; | ||
1471 | |||
1472 | local_irq_save(flags); | ||
1473 | __this_cpu_write(rcu_cpu_has_work, 1); | ||
1474 | if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) { | ||
1475 | local_irq_restore(flags); | ||
1476 | return; | ||
1477 | } | ||
1478 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); | ||
1479 | local_irq_restore(flags); | ||
1480 | } | ||
1481 | |||
1482 | /* | ||
1483 | * Wake up the specified per-rcu_node-structure kthread. | ||
1484 | * Because the per-rcu_node kthreads are immortal, we don't need | ||
1485 | * to do anything to keep them alive. | ||
1486 | */ | ||
1487 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | ||
1488 | { | ||
1489 | struct task_struct *t; | ||
1490 | |||
1491 | t = rnp->node_kthread_task; | ||
1492 | if (t != NULL) | ||
1493 | wake_up_process(t); | ||
1494 | } | ||
1495 | |||
1496 | /* | ||
1497 | * Set the specified CPU's kthread to run RT or not, as specified by | ||
1498 | * the to_rt argument. The CPU-hotplug locks are held, so the task | ||
1499 | * is not going away. | ||
1500 | */ | ||
1501 | static void rcu_cpu_kthread_setrt(int cpu, int to_rt) | ||
1502 | { | ||
1503 | int policy; | ||
1504 | struct sched_param sp; | ||
1505 | struct task_struct *t; | ||
1506 | |||
1507 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1508 | if (t == NULL) | ||
1509 | return; | ||
1510 | if (to_rt) { | ||
1511 | policy = SCHED_FIFO; | ||
1512 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1513 | } else { | ||
1514 | policy = SCHED_NORMAL; | ||
1515 | sp.sched_priority = 0; | ||
1516 | } | ||
1517 | sched_setscheduler_nocheck(t, policy, &sp); | ||
1518 | } | ||
1519 | |||
1520 | /* | ||
1521 | * Timer handler to initiate the waking up of per-CPU kthreads that | ||
1522 | * have yielded the CPU due to excess numbers of RCU callbacks. | ||
1523 | * We wake up the per-rcu_node kthread, which in turn will wake up | ||
1524 | * the booster kthread. | ||
1525 | */ | ||
1526 | static void rcu_cpu_kthread_timer(unsigned long arg) | ||
1527 | { | ||
1528 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); | ||
1529 | struct rcu_node *rnp = rdp->mynode; | ||
1530 | |||
1531 | atomic_or(rdp->grpmask, &rnp->wakemask); | ||
1532 | invoke_rcu_node_kthread(rnp); | ||
1533 | } | ||
1534 | |||
1535 | /* | ||
1536 | * Drop to non-real-time priority and yield, but only after posting a | ||
1537 | * timer that will cause us to regain our real-time priority if we | ||
1538 | * remain preempted. Either way, we restore our real-time priority | ||
1539 | * before returning. | ||
1540 | */ | ||
1541 | static void rcu_yield(void (*f)(unsigned long), unsigned long arg) | ||
1542 | { | ||
1543 | struct sched_param sp; | ||
1544 | struct timer_list yield_timer; | ||
1545 | |||
1546 | setup_timer_on_stack(&yield_timer, f, arg); | ||
1547 | mod_timer(&yield_timer, jiffies + 2); | ||
1548 | sp.sched_priority = 0; | ||
1549 | sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); | ||
1550 | set_user_nice(current, 19); | ||
1551 | schedule(); | ||
1552 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1553 | sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); | ||
1554 | del_timer(&yield_timer); | ||
1555 | } | ||
1556 | |||
1557 | /* | ||
1558 | * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. | ||
1559 | * This can happen while the corresponding CPU is either coming online | ||
1560 | * or going offline. We cannot wait until the CPU is fully online | ||
1561 | * before starting the kthread, because the various notifier functions | ||
1562 | * can wait for RCU grace periods. So we park rcu_cpu_kthread() until | ||
1563 | * the corresponding CPU is online. | ||
1564 | * | ||
1565 | * Return 1 if the kthread needs to stop, 0 otherwise. | ||
1566 | * | ||
1567 | * Caller must disable bh. This function can momentarily enable it. | ||
1568 | */ | ||
1569 | static int rcu_cpu_kthread_should_stop(int cpu) | ||
1570 | { | ||
1571 | while (cpu_is_offline(cpu) || | ||
1572 | !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || | ||
1573 | smp_processor_id() != cpu) { | ||
1574 | if (kthread_should_stop()) | ||
1575 | return 1; | ||
1576 | per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; | ||
1577 | per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); | ||
1578 | local_bh_enable(); | ||
1579 | schedule_timeout_uninterruptible(1); | ||
1580 | if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) | ||
1581 | set_cpus_allowed_ptr(current, cpumask_of(cpu)); | ||
1582 | local_bh_disable(); | ||
1583 | } | ||
1584 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1585 | return 0; | ||
1586 | } | ||
1587 | |||
1588 | /* | ||
1589 | * Per-CPU kernel thread that invokes RCU callbacks. This replaces the | ||
1590 | * earlier RCU softirq. | ||
1591 | */ | ||
1592 | static int rcu_cpu_kthread(void *arg) | ||
1593 | { | ||
1594 | int cpu = (int)(long)arg; | ||
1595 | unsigned long flags; | ||
1596 | int spincnt = 0; | ||
1597 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | ||
1598 | char work; | ||
1599 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | ||
1600 | |||
1601 | for (;;) { | ||
1602 | *statusp = RCU_KTHREAD_WAITING; | ||
1603 | rcu_wait(*workp != 0 || kthread_should_stop()); | ||
1604 | local_bh_disable(); | ||
1605 | if (rcu_cpu_kthread_should_stop(cpu)) { | ||
1606 | local_bh_enable(); | ||
1607 | break; | ||
1608 | } | ||
1609 | *statusp = RCU_KTHREAD_RUNNING; | ||
1610 | per_cpu(rcu_cpu_kthread_loops, cpu)++; | ||
1611 | local_irq_save(flags); | ||
1612 | work = *workp; | ||
1613 | *workp = 0; | ||
1614 | local_irq_restore(flags); | ||
1615 | if (work) | ||
1616 | rcu_process_callbacks(); | ||
1617 | local_bh_enable(); | ||
1618 | if (*workp != 0) | ||
1619 | spincnt++; | ||
1620 | else | ||
1621 | spincnt = 0; | ||
1622 | if (spincnt > 10) { | ||
1623 | *statusp = RCU_KTHREAD_YIELDING; | ||
1624 | rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); | ||
1625 | spincnt = 0; | ||
1626 | } | ||
1627 | } | ||
1628 | *statusp = RCU_KTHREAD_STOPPED; | ||
1629 | return 0; | ||
1630 | } | ||
1631 | |||
1632 | /* | ||
1633 | * Spawn a per-CPU kthread, setting up affinity and priority. | ||
1634 | * Because the CPU hotplug lock is held, no other CPU will be attempting | ||
1635 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | ||
1636 | * attempting to access it during boot, but the locking in kthread_bind() | ||
1637 | * will enforce sufficient ordering. | ||
1638 | */ | ||
1639 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | ||
1640 | { | 1469 | { |
1641 | struct sched_param sp; | 1470 | if (likely(!rsp->boost)) { |
1642 | struct task_struct *t; | 1471 | rcu_do_batch(rsp, rdp); |
1643 | |||
1644 | if (!rcu_kthreads_spawnable || | ||
1645 | per_cpu(rcu_cpu_kthread_task, cpu) != NULL) | ||
1646 | return 0; | ||
1647 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | ||
1648 | if (IS_ERR(t)) | ||
1649 | return PTR_ERR(t); | ||
1650 | kthread_bind(t, cpu); | ||
1651 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | ||
1652 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | ||
1653 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1654 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1655 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1656 | return 0; | ||
1657 | } | ||
1658 | |||
1659 | /* | ||
1660 | * Per-rcu_node kthread, which is in charge of waking up the per-CPU | ||
1661 | * kthreads when needed. We ignore requests to wake up kthreads | ||
1662 | * for offline CPUs, which is OK because force_quiescent_state() | ||
1663 | * takes care of this case. | ||
1664 | */ | ||
1665 | static int rcu_node_kthread(void *arg) | ||
1666 | { | ||
1667 | int cpu; | ||
1668 | unsigned long flags; | ||
1669 | unsigned long mask; | ||
1670 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1671 | struct sched_param sp; | ||
1672 | struct task_struct *t; | ||
1673 | |||
1674 | for (;;) { | ||
1675 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | ||
1676 | rcu_wait(atomic_read(&rnp->wakemask) != 0); | ||
1677 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | ||
1678 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1679 | mask = atomic_xchg(&rnp->wakemask, 0); | ||
1680 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ | ||
1681 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | ||
1682 | if ((mask & 0x1) == 0) | ||
1683 | continue; | ||
1684 | preempt_disable(); | ||
1685 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1686 | if (!cpu_online(cpu) || t == NULL) { | ||
1687 | preempt_enable(); | ||
1688 | continue; | ||
1689 | } | ||
1690 | per_cpu(rcu_cpu_has_work, cpu) = 1; | ||
1691 | sp.sched_priority = RCU_KTHREAD_PRIO; | ||
1692 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1693 | preempt_enable(); | ||
1694 | } | ||
1695 | } | ||
1696 | /* NOTREACHED */ | ||
1697 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | ||
1698 | return 0; | ||
1699 | } | ||
1700 | |||
1701 | /* | ||
1702 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are | ||
1703 | * served by the rcu_node in question. The CPU hotplug lock is still | ||
1704 | * held, so the value of rnp->qsmaskinit will be stable. | ||
1705 | * | ||
1706 | * We don't include outgoingcpu in the affinity set, use -1 if there is | ||
1707 | * no outgoing CPU. If there are no CPUs left in the affinity set, | ||
1708 | * this function allows the kthread to execute on any CPU. | ||
1709 | */ | ||
1710 | static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | ||
1711 | { | ||
1712 | cpumask_var_t cm; | ||
1713 | int cpu; | ||
1714 | unsigned long mask = rnp->qsmaskinit; | ||
1715 | |||
1716 | if (rnp->node_kthread_task == NULL) | ||
1717 | return; | ||
1718 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | ||
1719 | return; | 1472 | return; |
1720 | cpumask_clear(cm); | ||
1721 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) | ||
1722 | if ((mask & 0x1) && cpu != outgoingcpu) | ||
1723 | cpumask_set_cpu(cpu, cm); | ||
1724 | if (cpumask_weight(cm) == 0) { | ||
1725 | cpumask_setall(cm); | ||
1726 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) | ||
1727 | cpumask_clear_cpu(cpu, cm); | ||
1728 | WARN_ON_ONCE(cpumask_weight(cm) == 0); | ||
1729 | } | 1473 | } |
1730 | set_cpus_allowed_ptr(rnp->node_kthread_task, cm); | 1474 | invoke_rcu_callbacks_kthread(); |
1731 | rcu_boost_kthread_setaffinity(rnp, cm); | ||
1732 | free_cpumask_var(cm); | ||
1733 | } | 1475 | } |
1734 | 1476 | ||
1735 | /* | 1477 | static void invoke_rcu_core(void) |
1736 | * Spawn a per-rcu_node kthread, setting priority and affinity. | ||
1737 | * Called during boot before online/offline can happen, or, if | ||
1738 | * during runtime, with the main CPU-hotplug locks held. So only | ||
1739 | * one of these can be executing at a time. | ||
1740 | */ | ||
1741 | static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | ||
1742 | struct rcu_node *rnp) | ||
1743 | { | 1478 | { |
1744 | unsigned long flags; | 1479 | raise_softirq(RCU_SOFTIRQ); |
1745 | int rnp_index = rnp - &rsp->node[0]; | ||
1746 | struct sched_param sp; | ||
1747 | struct task_struct *t; | ||
1748 | |||
1749 | if (!rcu_kthreads_spawnable || | ||
1750 | rnp->qsmaskinit == 0) | ||
1751 | return 0; | ||
1752 | if (rnp->node_kthread_task == NULL) { | ||
1753 | t = kthread_create(rcu_node_kthread, (void *)rnp, | ||
1754 | "rcun%d", rnp_index); | ||
1755 | if (IS_ERR(t)) | ||
1756 | return PTR_ERR(t); | ||
1757 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1758 | rnp->node_kthread_task = t; | ||
1759 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1760 | sp.sched_priority = 99; | ||
1761 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | ||
1762 | } | ||
1763 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | ||
1764 | } | 1480 | } |
1765 | 1481 | ||
1766 | static void rcu_wake_one_boost_kthread(struct rcu_node *rnp); | ||
1767 | |||
1768 | /* | ||
1769 | * Spawn all kthreads -- called as soon as the scheduler is running. | ||
1770 | */ | ||
1771 | static int __init rcu_spawn_kthreads(void) | ||
1772 | { | ||
1773 | int cpu; | ||
1774 | struct rcu_node *rnp; | ||
1775 | struct task_struct *t; | ||
1776 | |||
1777 | rcu_kthreads_spawnable = 1; | ||
1778 | for_each_possible_cpu(cpu) { | ||
1779 | per_cpu(rcu_cpu_has_work, cpu) = 0; | ||
1780 | if (cpu_online(cpu)) { | ||
1781 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
1782 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1783 | if (t) | ||
1784 | wake_up_process(t); | ||
1785 | } | ||
1786 | } | ||
1787 | rnp = rcu_get_root(rcu_state); | ||
1788 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1789 | if (rnp->node_kthread_task) | ||
1790 | wake_up_process(rnp->node_kthread_task); | ||
1791 | if (NUM_RCU_NODES > 1) { | ||
1792 | rcu_for_each_leaf_node(rcu_state, rnp) { | ||
1793 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
1794 | t = rnp->node_kthread_task; | ||
1795 | if (t) | ||
1796 | wake_up_process(t); | ||
1797 | rcu_wake_one_boost_kthread(rnp); | ||
1798 | } | ||
1799 | } | ||
1800 | return 0; | ||
1801 | } | ||
1802 | early_initcall(rcu_spawn_kthreads); | ||
1803 | |||
1804 | static void | 1482 | static void |
1805 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), | 1483 | __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), |
1806 | struct rcu_state *rsp) | 1484 | struct rcu_state *rsp) |
@@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu) | |||
2207 | rcu_preempt_init_percpu_data(cpu); | 1885 | rcu_preempt_init_percpu_data(cpu); |
2208 | } | 1886 | } |
2209 | 1887 | ||
2210 | static void __cpuinit rcu_prepare_kthreads(int cpu) | ||
2211 | { | ||
2212 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2213 | struct rcu_node *rnp = rdp->mynode; | ||
2214 | |||
2215 | /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ | ||
2216 | if (rcu_kthreads_spawnable) { | ||
2217 | (void)rcu_spawn_one_cpu_kthread(cpu); | ||
2218 | if (rnp->node_kthread_task == NULL) | ||
2219 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | ||
2220 | } | ||
2221 | } | ||
2222 | |||
2223 | /* | ||
2224 | * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state, | ||
2225 | * but the RCU threads are woken on demand, and if demand is low this | ||
2226 | * could be a while triggering the hung task watchdog. | ||
2227 | * | ||
2228 | * In order to avoid this, poke all tasks once the CPU is fully | ||
2229 | * up and running. | ||
2230 | */ | ||
2231 | static void __cpuinit rcu_online_kthreads(int cpu) | ||
2232 | { | ||
2233 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2234 | struct rcu_node *rnp = rdp->mynode; | ||
2235 | struct task_struct *t; | ||
2236 | |||
2237 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
2238 | if (t) | ||
2239 | wake_up_process(t); | ||
2240 | |||
2241 | t = rnp->node_kthread_task; | ||
2242 | if (t) | ||
2243 | wake_up_process(t); | ||
2244 | |||
2245 | rcu_wake_one_boost_kthread(rnp); | ||
2246 | } | ||
2247 | |||
2248 | /* | 1888 | /* |
2249 | * Handle CPU online/offline notification events. | 1889 | * Handle CPU online/offline notification events. |
2250 | */ | 1890 | */ |
@@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
2262 | rcu_prepare_kthreads(cpu); | 1902 | rcu_prepare_kthreads(cpu); |
2263 | break; | 1903 | break; |
2264 | case CPU_ONLINE: | 1904 | case CPU_ONLINE: |
2265 | rcu_online_kthreads(cpu); | ||
2266 | case CPU_DOWN_FAILED: | 1905 | case CPU_DOWN_FAILED: |
2267 | rcu_node_kthread_setaffinity(rnp, -1); | 1906 | rcu_node_kthread_setaffinity(rnp, -1); |
2268 | rcu_cpu_kthread_setrt(cpu, 1); | 1907 | rcu_cpu_kthread_setrt(cpu, 1); |
@@ -2410,6 +2049,7 @@ void __init rcu_init(void) | |||
2410 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); | 2049 | rcu_init_one(&rcu_sched_state, &rcu_sched_data); |
2411 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); | 2050 | rcu_init_one(&rcu_bh_state, &rcu_bh_data); |
2412 | __rcu_init_preempt(); | 2051 | __rcu_init_preempt(); |
2052 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | ||
2413 | 2053 | ||
2414 | /* | 2054 | /* |
2415 | * We don't need protection against CPU-hotplug here because | 2055 | * We don't need protection against CPU-hotplug here because |