diff options
-rw-r--r-- | kernel/rcutree.c | 23 | ||||
-rw-r--r-- | kernel/rcutree.h | 17 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 16 |
3 files changed, 17 insertions, 39 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 5d96d68d20f8..05e254e930e3 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -95,7 +95,6 @@ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); | |||
95 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); | 95 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); |
96 | DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); | 96 | DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); |
97 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); | 97 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); |
98 | static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq); | ||
99 | DEFINE_PER_CPU(char, rcu_cpu_has_work); | 98 | DEFINE_PER_CPU(char, rcu_cpu_has_work); |
100 | static char rcu_kthreads_spawnable; | 99 | static char rcu_kthreads_spawnable; |
101 | 100 | ||
@@ -1476,7 +1475,7 @@ static void invoke_rcu_cpu_kthread(void) | |||
1476 | local_irq_restore(flags); | 1475 | local_irq_restore(flags); |
1477 | return; | 1476 | return; |
1478 | } | 1477 | } |
1479 | wake_up(&__get_cpu_var(rcu_cpu_wq)); | 1478 | wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); |
1480 | local_irq_restore(flags); | 1479 | local_irq_restore(flags); |
1481 | } | 1480 | } |
1482 | 1481 | ||
@@ -1596,14 +1595,12 @@ static int rcu_cpu_kthread(void *arg) | |||
1596 | unsigned long flags; | 1595 | unsigned long flags; |
1597 | int spincnt = 0; | 1596 | int spincnt = 0; |
1598 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); | 1597 | unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); |
1599 | wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu); | ||
1600 | char work; | 1598 | char work; |
1601 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); | 1599 | char *workp = &per_cpu(rcu_cpu_has_work, cpu); |
1602 | 1600 | ||
1603 | for (;;) { | 1601 | for (;;) { |
1604 | *statusp = RCU_KTHREAD_WAITING; | 1602 | *statusp = RCU_KTHREAD_WAITING; |
1605 | wait_event_interruptible(*wqp, | 1603 | rcu_wait(*workp != 0 || kthread_should_stop()); |
1606 | *workp != 0 || kthread_should_stop()); | ||
1607 | local_bh_disable(); | 1604 | local_bh_disable(); |
1608 | if (rcu_cpu_kthread_should_stop(cpu)) { | 1605 | if (rcu_cpu_kthread_should_stop(cpu)) { |
1609 | local_bh_enable(); | 1606 | local_bh_enable(); |
@@ -1654,7 +1651,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) | |||
1654 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; | 1651 | per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; |
1655 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); | 1652 | WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); |
1656 | per_cpu(rcu_cpu_kthread_task, cpu) = t; | 1653 | per_cpu(rcu_cpu_kthread_task, cpu) = t; |
1657 | wake_up_process(t); | ||
1658 | sp.sched_priority = RCU_KTHREAD_PRIO; | 1654 | sp.sched_priority = RCU_KTHREAD_PRIO; |
1659 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | 1655 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
1660 | return 0; | 1656 | return 0; |
@@ -1677,8 +1673,7 @@ static int rcu_node_kthread(void *arg) | |||
1677 | 1673 | ||
1678 | for (;;) { | 1674 | for (;;) { |
1679 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | 1675 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; |
1680 | wait_event_interruptible(rnp->node_wq, | 1676 | rcu_wait(atomic_read(&rnp->wakemask) != 0); |
1681 | atomic_read(&rnp->wakemask) != 0); | ||
1682 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | 1677 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; |
1683 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1678 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1684 | mask = atomic_xchg(&rnp->wakemask, 0); | 1679 | mask = atomic_xchg(&rnp->wakemask, 0); |
@@ -1762,7 +1757,6 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, | |||
1762 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1757 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1763 | rnp->node_kthread_task = t; | 1758 | rnp->node_kthread_task = t; |
1764 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1759 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1765 | wake_up_process(t); | ||
1766 | sp.sched_priority = 99; | 1760 | sp.sched_priority = 99; |
1767 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | 1761 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
1768 | } | 1762 | } |
@@ -1779,21 +1773,16 @@ static int __init rcu_spawn_kthreads(void) | |||
1779 | 1773 | ||
1780 | rcu_kthreads_spawnable = 1; | 1774 | rcu_kthreads_spawnable = 1; |
1781 | for_each_possible_cpu(cpu) { | 1775 | for_each_possible_cpu(cpu) { |
1782 | init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu)); | ||
1783 | per_cpu(rcu_cpu_has_work, cpu) = 0; | 1776 | per_cpu(rcu_cpu_has_work, cpu) = 0; |
1784 | if (cpu_online(cpu)) | 1777 | if (cpu_online(cpu)) |
1785 | (void)rcu_spawn_one_cpu_kthread(cpu); | 1778 | (void)rcu_spawn_one_cpu_kthread(cpu); |
1786 | } | 1779 | } |
1787 | rnp = rcu_get_root(rcu_state); | 1780 | rnp = rcu_get_root(rcu_state); |
1788 | init_waitqueue_head(&rnp->node_wq); | ||
1789 | rcu_init_boost_waitqueue(rnp); | ||
1790 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | 1781 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); |
1791 | if (NUM_RCU_NODES > 1) | 1782 | if (NUM_RCU_NODES > 1) { |
1792 | rcu_for_each_leaf_node(rcu_state, rnp) { | 1783 | rcu_for_each_leaf_node(rcu_state, rnp) |
1793 | init_waitqueue_head(&rnp->node_wq); | ||
1794 | rcu_init_boost_waitqueue(rnp); | ||
1795 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); | 1784 | (void)rcu_spawn_one_node_kthread(rcu_state, rnp); |
1796 | } | 1785 | } |
1797 | return 0; | 1786 | return 0; |
1798 | } | 1787 | } |
1799 | early_initcall(rcu_spawn_kthreads); | 1788 | early_initcall(rcu_spawn_kthreads); |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 561dcb9a8d2c..7b9a08b4aaea 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -159,9 +159,6 @@ struct rcu_node { | |||
159 | struct task_struct *boost_kthread_task; | 159 | struct task_struct *boost_kthread_task; |
160 | /* kthread that takes care of priority */ | 160 | /* kthread that takes care of priority */ |
161 | /* boosting for this rcu_node structure. */ | 161 | /* boosting for this rcu_node structure. */ |
162 | wait_queue_head_t boost_wq; | ||
163 | /* Wait queue on which to park the boost */ | ||
164 | /* kthread. */ | ||
165 | unsigned int boost_kthread_status; | 162 | unsigned int boost_kthread_status; |
166 | /* State of boost_kthread_task for tracing. */ | 163 | /* State of boost_kthread_task for tracing. */ |
167 | unsigned long n_tasks_boosted; | 164 | unsigned long n_tasks_boosted; |
@@ -188,9 +185,6 @@ struct rcu_node { | |||
188 | /* kthread that takes care of this rcu_node */ | 185 | /* kthread that takes care of this rcu_node */ |
189 | /* structure, for example, awakening the */ | 186 | /* structure, for example, awakening the */ |
190 | /* per-CPU kthreads as needed. */ | 187 | /* per-CPU kthreads as needed. */ |
191 | wait_queue_head_t node_wq; | ||
192 | /* Wait queue on which to park the per-node */ | ||
193 | /* kthread. */ | ||
194 | unsigned int node_kthread_status; | 188 | unsigned int node_kthread_status; |
195 | /* State of node_kthread_task for tracing. */ | 189 | /* State of node_kthread_task for tracing. */ |
196 | } ____cacheline_internodealigned_in_smp; | 190 | } ____cacheline_internodealigned_in_smp; |
@@ -336,6 +330,16 @@ struct rcu_data { | |||
336 | /* scheduling clock irq */ | 330 | /* scheduling clock irq */ |
337 | /* before ratting on them. */ | 331 | /* before ratting on them. */ |
338 | 332 | ||
333 | #define rcu_wait(cond) \ | ||
334 | do { \ | ||
335 | for (;;) { \ | ||
336 | set_current_state(TASK_INTERRUPTIBLE); \ | ||
337 | if (cond) \ | ||
338 | break; \ | ||
339 | schedule(); \ | ||
340 | } \ | ||
341 | __set_current_state(TASK_RUNNING); \ | ||
342 | } while (0) | ||
339 | 343 | ||
340 | /* | 344 | /* |
341 | * RCU global state, including node hierarchy. This hierarchy is | 345 | * RCU global state, including node hierarchy. This hierarchy is |
@@ -445,7 +449,6 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu); | |||
445 | static void rcu_preempt_send_cbs_to_online(void); | 449 | static void rcu_preempt_send_cbs_to_online(void); |
446 | static void __init __rcu_init_preempt(void); | 450 | static void __init __rcu_init_preempt(void); |
447 | static void rcu_needs_cpu_flush(void); | 451 | static void rcu_needs_cpu_flush(void); |
448 | static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp); | ||
449 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 452 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
450 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 453 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
451 | cpumask_var_t cm); | 454 | cpumask_var_t cm); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index ed339702481d..049f2787a984 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1196,8 +1196,7 @@ static int rcu_boost_kthread(void *arg) | |||
1196 | 1196 | ||
1197 | for (;;) { | 1197 | for (;;) { |
1198 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; | 1198 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; |
1199 | wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks || | 1199 | rcu_wait(rnp->boost_tasks || rnp->exp_tasks); |
1200 | rnp->exp_tasks); | ||
1201 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; | 1200 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; |
1202 | more2boost = rcu_boost(rnp); | 1201 | more2boost = rcu_boost(rnp); |
1203 | if (more2boost) | 1202 | if (more2boost) |
@@ -1275,14 +1274,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |||
1275 | } | 1274 | } |
1276 | 1275 | ||
1277 | /* | 1276 | /* |
1278 | * Initialize the RCU-boost waitqueue. | ||
1279 | */ | ||
1280 | static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp) | ||
1281 | { | ||
1282 | init_waitqueue_head(&rnp->boost_wq); | ||
1283 | } | ||
1284 | |||
1285 | /* | ||
1286 | * Create an RCU-boost kthread for the specified node if one does not | 1277 | * Create an RCU-boost kthread for the specified node if one does not |
1287 | * already exist. We only create this kthread for preemptible RCU. | 1278 | * already exist. We only create this kthread for preemptible RCU. |
1288 | * Returns zero if all is well, a negated errno otherwise. | 1279 | * Returns zero if all is well, a negated errno otherwise. |
@@ -1306,7 +1297,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1306 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1297 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1307 | rnp->boost_kthread_task = t; | 1298 | rnp->boost_kthread_task = t; |
1308 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1299 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1309 | wake_up_process(t); | ||
1310 | sp.sched_priority = RCU_KTHREAD_PRIO; | 1300 | sp.sched_priority = RCU_KTHREAD_PRIO; |
1311 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); | 1301 | sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); |
1312 | return 0; | 1302 | return 0; |
@@ -1328,10 +1318,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |||
1328 | { | 1318 | { |
1329 | } | 1319 | } |
1330 | 1320 | ||
1331 | static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp) | ||
1332 | { | ||
1333 | } | ||
1334 | |||
1335 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 1321 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
1336 | struct rcu_node *rnp, | 1322 | struct rcu_node *rnp, |
1337 | int rnp_index) | 1323 | int rnp_index) |