aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c23
1 files changed, 6 insertions, 17 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 5d96d68d20f8..05e254e930e3 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -95,7 +95,6 @@ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
95DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 95DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
96DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); 96DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu);
97DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 97DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
98static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
99DEFINE_PER_CPU(char, rcu_cpu_has_work); 98DEFINE_PER_CPU(char, rcu_cpu_has_work);
100static char rcu_kthreads_spawnable; 99static char rcu_kthreads_spawnable;
101 100
@@ -1476,7 +1475,7 @@ static void invoke_rcu_cpu_kthread(void)
1476 local_irq_restore(flags); 1475 local_irq_restore(flags);
1477 return; 1476 return;
1478 } 1477 }
1479 wake_up(&__get_cpu_var(rcu_cpu_wq)); 1478 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1480 local_irq_restore(flags); 1479 local_irq_restore(flags);
1481} 1480}
1482 1481
@@ -1596,14 +1595,12 @@ static int rcu_cpu_kthread(void *arg)
1596 unsigned long flags; 1595 unsigned long flags;
1597 int spincnt = 0; 1596 int spincnt = 0;
1598 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); 1597 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1599 wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
1600 char work; 1598 char work;
1601 char *workp = &per_cpu(rcu_cpu_has_work, cpu); 1599 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1602 1600
1603 for (;;) { 1601 for (;;) {
1604 *statusp = RCU_KTHREAD_WAITING; 1602 *statusp = RCU_KTHREAD_WAITING;
1605 wait_event_interruptible(*wqp, 1603 rcu_wait(*workp != 0 || kthread_should_stop());
1606 *workp != 0 || kthread_should_stop());
1607 local_bh_disable(); 1604 local_bh_disable();
1608 if (rcu_cpu_kthread_should_stop(cpu)) { 1605 if (rcu_cpu_kthread_should_stop(cpu)) {
1609 local_bh_enable(); 1606 local_bh_enable();
@@ -1654,7 +1651,6 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1654 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; 1651 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1655 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); 1652 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1656 per_cpu(rcu_cpu_kthread_task, cpu) = t; 1653 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1657 wake_up_process(t);
1658 sp.sched_priority = RCU_KTHREAD_PRIO; 1654 sp.sched_priority = RCU_KTHREAD_PRIO;
1659 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1655 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1660 return 0; 1656 return 0;
@@ -1677,8 +1673,7 @@ static int rcu_node_kthread(void *arg)
1677 1673
1678 for (;;) { 1674 for (;;) {
1679 rnp->node_kthread_status = RCU_KTHREAD_WAITING; 1675 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1680 wait_event_interruptible(rnp->node_wq, 1676 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1681 atomic_read(&rnp->wakemask) != 0);
1682 rnp->node_kthread_status = RCU_KTHREAD_RUNNING; 1677 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1683 raw_spin_lock_irqsave(&rnp->lock, flags); 1678 raw_spin_lock_irqsave(&rnp->lock, flags);
1684 mask = atomic_xchg(&rnp->wakemask, 0); 1679 mask = atomic_xchg(&rnp->wakemask, 0);
@@ -1762,7 +1757,6 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1762 raw_spin_lock_irqsave(&rnp->lock, flags); 1757 raw_spin_lock_irqsave(&rnp->lock, flags);
1763 rnp->node_kthread_task = t; 1758 rnp->node_kthread_task = t;
1764 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1759 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1765 wake_up_process(t);
1766 sp.sched_priority = 99; 1760 sp.sched_priority = 99;
1767 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1761 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1768 } 1762 }
@@ -1779,21 +1773,16 @@ static int __init rcu_spawn_kthreads(void)
1779 1773
1780 rcu_kthreads_spawnable = 1; 1774 rcu_kthreads_spawnable = 1;
1781 for_each_possible_cpu(cpu) { 1775 for_each_possible_cpu(cpu) {
1782 init_waitqueue_head(&per_cpu(rcu_cpu_wq, cpu));
1783 per_cpu(rcu_cpu_has_work, cpu) = 0; 1776 per_cpu(rcu_cpu_has_work, cpu) = 0;
1784 if (cpu_online(cpu)) 1777 if (cpu_online(cpu))
1785 (void)rcu_spawn_one_cpu_kthread(cpu); 1778 (void)rcu_spawn_one_cpu_kthread(cpu);
1786 } 1779 }
1787 rnp = rcu_get_root(rcu_state); 1780 rnp = rcu_get_root(rcu_state);
1788 init_waitqueue_head(&rnp->node_wq);
1789 rcu_init_boost_waitqueue(rnp);
1790 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1781 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1791 if (NUM_RCU_NODES > 1) 1782 if (NUM_RCU_NODES > 1) {
1792 rcu_for_each_leaf_node(rcu_state, rnp) { 1783 rcu_for_each_leaf_node(rcu_state, rnp)
1793 init_waitqueue_head(&rnp->node_wq);
1794 rcu_init_boost_waitqueue(rnp);
1795 (void)rcu_spawn_one_node_kthread(rcu_state, rnp); 1784 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1796 } 1785 }
1797 return 0; 1786 return 0;
1798} 1787}
1799early_initcall(rcu_spawn_kthreads); 1788early_initcall(rcu_spawn_kthreads);