aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-03-29 20:48:28 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-05-06 02:16:56 -0400
commitd71df90eadfc35aa549ff9a850842673febca71f (patch)
treedd275a3f4848784bc6625574341c11befc1b3823 /kernel/rcutree.c
parent0ac3d136b2e3cdf1161178223bc5da14a06241d0 (diff)
rcu: add tracing for RCU's kthread run states.
Add tracing to help debugging situations when RCU's kthreads are not running but are supposed to be. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 198e4df7d83e..d8917401cbbc 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -91,8 +91,9 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
91 * handle all flavors of RCU. 91 * handle all flavors of RCU.
92 */ 92 */
93static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); 93static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
94DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
94static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq); 95static DEFINE_PER_CPU(wait_queue_head_t, rcu_cpu_wq);
95static DEFINE_PER_CPU(char, rcu_cpu_has_work); 96DEFINE_PER_CPU(char, rcu_cpu_has_work);
96static char rcu_kthreads_spawnable; 97static char rcu_kthreads_spawnable;
97 98
98static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 99static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
@@ -1563,11 +1564,13 @@ static int rcu_cpu_kthread(void *arg)
1563 int cpu = (int)(long)arg; 1564 int cpu = (int)(long)arg;
1564 unsigned long flags; 1565 unsigned long flags;
1565 int spincnt = 0; 1566 int spincnt = 0;
1567 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1566 wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu); 1568 wait_queue_head_t *wqp = &per_cpu(rcu_cpu_wq, cpu);
1567 char work; 1569 char work;
1568 char *workp = &per_cpu(rcu_cpu_has_work, cpu); 1570 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1569 1571
1570 for (;;) { 1572 for (;;) {
1573 *statusp = RCU_KTHREAD_WAITING;
1571 wait_event_interruptible(*wqp, 1574 wait_event_interruptible(*wqp,
1572 *workp != 0 || kthread_should_stop()); 1575 *workp != 0 || kthread_should_stop());
1573 local_bh_disable(); 1576 local_bh_disable();
@@ -1575,6 +1578,7 @@ static int rcu_cpu_kthread(void *arg)
1575 local_bh_enable(); 1578 local_bh_enable();
1576 break; 1579 break;
1577 } 1580 }
1581 *statusp = RCU_KTHREAD_RUNNING;
1578 local_irq_save(flags); 1582 local_irq_save(flags);
1579 work = *workp; 1583 work = *workp;
1580 *workp = 0; 1584 *workp = 0;
@@ -1587,10 +1591,12 @@ static int rcu_cpu_kthread(void *arg)
1587 else 1591 else
1588 spincnt = 0; 1592 spincnt = 0;
1589 if (spincnt > 10) { 1593 if (spincnt > 10) {
1594 *statusp = RCU_KTHREAD_YIELDING;
1590 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); 1595 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1591 spincnt = 0; 1596 spincnt = 0;
1592 } 1597 }
1593 } 1598 }
1599 *statusp = RCU_KTHREAD_STOPPED;
1594 return 0; 1600 return 0;
1595} 1601}
1596 1602
@@ -1637,10 +1643,12 @@ static int rcu_node_kthread(void *arg)
1637 struct task_struct *t; 1643 struct task_struct *t;
1638 1644
1639 for (;;) { 1645 for (;;) {
1646 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1640 wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 || 1647 wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 ||
1641 kthread_should_stop()); 1648 kthread_should_stop());
1642 if (kthread_should_stop()) 1649 if (kthread_should_stop())
1643 break; 1650 break;
1651 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1644 raw_spin_lock_irqsave(&rnp->lock, flags); 1652 raw_spin_lock_irqsave(&rnp->lock, flags);
1645 mask = rnp->wakemask; 1653 mask = rnp->wakemask;
1646 rnp->wakemask = 0; 1654 rnp->wakemask = 0;
@@ -1661,6 +1669,7 @@ static int rcu_node_kthread(void *arg)
1661 preempt_enable(); 1669 preempt_enable();
1662 } 1670 }
1663 } 1671 }
1672 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1664 return 0; 1673 return 0;
1665} 1674}
1666 1675