aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-11-01 11:57:21 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:31:42 -0500
commit0989cb46783188ea7346ba6490be0046b9b7a725 (patch)
treeab3ca7e6099efd9ca814e9202a679388f36ecc90
parent4145fa7fbee3ec1e61c52825b146192885d9759f (diff)
rcu: Add more information to the wrong-idle-task complaint
The current code just complains if the current task is not the idle task. This commit therefore adds printing of the identity of the idle task. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r--kernel/rcutiny.c12
-rw-r--r--kernel/rcutree.c12
2 files changed, 20 insertions, 4 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 9b9bdf666fb..6d70ff71a87 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -65,10 +65,14 @@ static void rcu_idle_enter_common(long long oldval)
65 } 65 }
66 RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting)); 66 RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting));
67 if (!idle_cpu(smp_processor_id())) { 67 if (!idle_cpu(smp_processor_id())) {
68 WARN_ON_ONCE(1); /* must be idle task! */ 68 struct task_struct *idle = idle_task(smp_processor_id());
69
69 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", 70 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
70 oldval, rcu_dynticks_nesting)); 71 oldval, rcu_dynticks_nesting));
71 ftrace_dump(DUMP_ALL); 72 ftrace_dump(DUMP_ALL);
73 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
74 current->pid, current->comm,
75 idle->pid, idle->comm); /* must be idle task! */
72 } 76 }
73 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ 77 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
74} 78}
@@ -115,10 +119,14 @@ static void rcu_idle_exit_common(long long oldval)
115 } 119 }
116 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting)); 120 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
117 if (!idle_cpu(smp_processor_id())) { 121 if (!idle_cpu(smp_processor_id())) {
118 WARN_ON_ONCE(1); /* must be idle task! */ 122 struct task_struct *idle = idle_task(smp_processor_id());
123
119 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", 124 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
120 oldval, rcu_dynticks_nesting)); 125 oldval, rcu_dynticks_nesting));
121 ftrace_dump(DUMP_ALL); 126 ftrace_dump(DUMP_ALL);
127 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
128 current->pid, current->comm,
129 idle->pid, idle->comm); /* must be idle task! */
122 } 130 }
123} 131}
124 132
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 06e40dd53b2..9888a0ad2d4 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -356,10 +356,14 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
356 } 356 }
357 trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting); 357 trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
358 if (!idle_cpu(smp_processor_id())) { 358 if (!idle_cpu(smp_processor_id())) {
359 WARN_ON_ONCE(1); /* must be idle task! */ 359 struct task_struct *idle = idle_task(smp_processor_id());
360
360 trace_rcu_dyntick("Error on entry: not idle task", 361 trace_rcu_dyntick("Error on entry: not idle task",
361 oldval, rdtp->dynticks_nesting); 362 oldval, rdtp->dynticks_nesting);
362 ftrace_dump(DUMP_ALL); 363 ftrace_dump(DUMP_ALL);
364 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
365 current->pid, current->comm,
366 idle->pid, idle->comm); /* must be idle task! */
363 } 367 }
364 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 368 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
365 smp_mb__before_atomic_inc(); /* See above. */ 369 smp_mb__before_atomic_inc(); /* See above. */
@@ -445,10 +449,14 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
445 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); 449 WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
446 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); 450 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
447 if (!idle_cpu(smp_processor_id())) { 451 if (!idle_cpu(smp_processor_id())) {
448 WARN_ON_ONCE(1); /* must be idle task! */ 452 struct task_struct *idle = idle_task(smp_processor_id());
453
449 trace_rcu_dyntick("Error on exit: not idle task", 454 trace_rcu_dyntick("Error on exit: not idle task",
450 oldval, rdtp->dynticks_nesting); 455 oldval, rdtp->dynticks_nesting);
451 ftrace_dump(DUMP_ALL); 456 ftrace_dump(DUMP_ALL);
457 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
458 current->pid, current->comm,
459 idle->pid, idle->comm); /* must be idle task! */
452 } 460 }
453} 461}
454 462