diff options
-rw-r--r-- | kernel/rcutree.c | 65 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 11 |
2 files changed, 22 insertions, 54 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 89419ff92e99..0a8ec5b2e208 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1635,6 +1635,20 @@ static int rcu_cpu_kthread(void *arg) | |||
1635 | * to manipulate rcu_cpu_kthread_task. There might be another CPU | 1635 | * to manipulate rcu_cpu_kthread_task. There might be another CPU |
1636 | * attempting to access it during boot, but the locking in kthread_bind() | 1636 | * attempting to access it during boot, but the locking in kthread_bind() |
1637 | * will enforce sufficient ordering. | 1637 | * will enforce sufficient ordering. |
1638 | * | ||
1639 | * Please note that we cannot simply refuse to wake up the per-CPU | ||
1640 | * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, | ||
1641 | * which can result in softlockup complaints if the task ends up being | ||
1642 | * idle for more than a couple of minutes. | ||
1643 | * | ||
1644 | * However, please note also that we cannot bind the per-CPU kthread to its | ||
1645 | * CPU until that CPU is fully online. We also cannot wait until the | ||
1646 | * CPU is fully online before we create its per-CPU kthread, as this would | ||
1647 | * deadlock the system when CPU notifiers tried waiting for grace | ||
1648 | * periods. So we bind the per-CPU kthread to its CPU only if the CPU | ||
1649 | * is online. If its CPU is not yet fully online, then the code in | ||
1650 | * rcu_cpu_kthread() will wait until it is fully online, and then do | ||
1651 | * the binding. | ||
1638 | */ | 1652 | */ |
1639 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | 1653 | static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) |
1640 | { | 1654 | { |
@@ -1647,12 +1661,14 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | |||
1647 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); | 1661 | t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu); |
1648 | if (IS_ERR(t)) | 1662 | if (IS_ERR(t)) |
1649 | return PTR_ERR(t); | 1663 | return PTR_ERR(t); |
1650 | kthread_bind(t, cpu); | 1664 | if (cpu_online(cpu)) |
1665 | kthread_bind(t, cpu); | ||
1651 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | 1666 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; |
1652 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | 1667 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); |
1653 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1654 | sp.sched_priority = RCU_KTHREAD_PRIO; | 1668 | sp.sched_priority = RCU_KTHREAD_PRIO; |
1655 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | 1669 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
1670 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | ||
1671 | wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ | ||
1656 | return 0; | 1672 | return 0; |
1657 | } | 1673 | } |
1658 | 1674 | ||
@@ -1759,12 +1775,11 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | |||
1759 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1775 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1760 | sp.sched_priority = 99; | 1776 | sp.sched_priority = 99; |
1761 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | 1777 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
1778 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | ||
1762 | } | 1779 | } |
1763 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); | 1780 | return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); |
1764 | } | 1781 | } |
1765 | 1782 | ||
1766 | static void rcu_wake_one_boost_kthread(struct rcu_node *rnp); | ||
1767 | |||
1768 | /* | 1783 | /* |
1769 | * Spawn all kthreads -- called as soon as the scheduler is running. | 1784 | * Spawn all kthreads -- called as soon as the scheduler is running. |
1770 | */ | 1785 | */ |
@@ -1772,30 +1787,18 @@ static int __init rcu_spawn_kthreads(void) | |||
1772 | { | 1787 | { |
1773 | int cpu; | 1788 | int cpu; |
1774 | struct rcu_node *rnp; | 1789 | struct rcu_node *rnp; |
1775 | struct task_struct *t; | ||
1776 | 1790 | ||
1777 | rcu_kthreads_spawnable = 1; | 1791 | rcu_kthreads_spawnable = 1; |
1778 | for_each_possible_cpu(cpu) { | 1792 | for_each_possible_cpu(cpu) { |
1779 | per_cpu(rcu_cpu_has_work, cpu) = 0; | 1793 | per_cpu(rcu_cpu_has_work, cpu) = 0; |
1780 | if (cpu_online(cpu)) { | 1794 | if (cpu_online(cpu)) |
1781 | (void)rcu_spawn_one_cpu_kthread(cpu); | 1795 | (void)rcu_spawn_one_cpu_kthread(cpu); |
1782 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
1783 | if (t) | ||
1784 | wake_up_process(t); | ||
1785 | } | ||
1786 | } | 1796 | } |
1787 | rnp = rcu_get_root(rcu_state); | 1797 | rnp = rcu_get_root(rcu_state); |
1788 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | 1798 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); |
1789 | if (rnp->node_kthread_task) | ||
1790 | wake_up_process(rnp->node_kthread_task); | ||
1791 | if (NUM_RCU_NODES > 1) { | 1799 | if (NUM_RCU_NODES > 1) { |
1792 | rcu_for_each_leaf_node(rcu_state, rnp) { | 1800 | rcu_for_each_leaf_node(rcu_state, rnp) |
1793 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | 1801 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); |
1794 | t = rnp->node_kthread_task; | ||
1795 | if (t) | ||
1796 | wake_up_process(t); | ||
1797 | rcu_wake_one_boost_kthread(rnp); | ||
1798 | } | ||
1799 | } | 1802 | } |
1800 | return 0; | 1803 | return 0; |
1801 | } | 1804 | } |
@@ -2221,31 +2224,6 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) | |||
2221 | } | 2224 | } |
2222 | 2225 | ||
2223 | /* | 2226 | /* |
2224 | * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state, | ||
2225 | * but the RCU threads are woken on demand, and if demand is low this | ||
2226 | * could be a while triggering the hung task watchdog. | ||
2227 | * | ||
2228 | * In order to avoid this, poke all tasks once the CPU is fully | ||
2229 | * up and running. | ||
2230 | */ | ||
2231 | static void __cpuinit rcu_online_kthreads(int cpu) | ||
2232 | { | ||
2233 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | ||
2234 | struct rcu_node *rnp = rdp->mynode; | ||
2235 | struct task_struct *t; | ||
2236 | |||
2237 | t = per_cpu(rcu_cpu_kthread_task, cpu); | ||
2238 | if (t) | ||
2239 | wake_up_process(t); | ||
2240 | |||
2241 | t = rnp->node_kthread_task; | ||
2242 | if (t) | ||
2243 | wake_up_process(t); | ||
2244 | |||
2245 | rcu_wake_one_boost_kthread(rnp); | ||
2246 | } | ||
2247 | |||
2248 | /* | ||
2249 | * Handle CPU online/offline notification events. | 2227 | * Handle CPU online/offline notification events. |
2250 | */ | 2228 | */ |
2251 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 2229 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, |
@@ -2262,7 +2240,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
2262 | rcu_prepare_kthreads(cpu); | 2240 | rcu_prepare_kthreads(cpu); |
2263 | break; | 2241 | break; |
2264 | case CPU_ONLINE: | 2242 | case CPU_ONLINE: |
2265 | rcu_online_kthreads(cpu); | ||
2266 | case CPU_DOWN_FAILED: | 2243 | case CPU_DOWN_FAILED: |
2267 | rcu_node_kthread_setaffinity(rnp, -1); | 2244 | rcu_node_kthread_setaffinity(rnp, -1); |
2268 | rcu_cpu_kthread_setrt(cpu, 1); | 2245 | rcu_cpu_kthread_setrt(cpu, 1); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c8bff3099a89..ea2e2fb79e81 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1299,15 +1299,10 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1299 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1299 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1300 | sp.sched_priority = RCU_KTHREAD_PRIO; | 1300 | sp.sched_priority = RCU_KTHREAD_PRIO; |
1301 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | 1301 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
1302 | wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ | ||
1302 | return 0; | 1303 | return 0; |
1303 | } | 1304 | } |
1304 | 1305 | ||
1305 | static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) | ||
1306 | { | ||
1307 | if (rnp->boost_kthread_task) | ||
1308 | wake_up_process(rnp->boost_kthread_task); | ||
1309 | } | ||
1310 | |||
1311 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1306 | #else /* #ifdef CONFIG_RCU_BOOST */ |
1312 | 1307 | ||
1313 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) | 1308 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
@@ -1331,10 +1326,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1331 | return 0; | 1326 | return 0; |
1332 | } | 1327 | } |
1333 | 1328 | ||
1334 | static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) | ||
1335 | { | ||
1336 | } | ||
1337 | |||
1338 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | 1329 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1339 | 1330 | ||
1340 | #ifndef CONFIG_SMP | 1331 | #ifndef CONFIG_SMP |