aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/update.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-10-28 12:22:24 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-12-09 18:12:39 -0500
commit24ef659a857c3cba40b64ea51ea4fce8d2fb7bbc (patch)
treea3059c0605de74b27d67bcfbb58bceabf3dff948 /kernel/rcu/update.c
parentbc72d962d6a0ba8d9d5314d04fd1775261a9ec79 (diff)
rcu: Provide better diagnostics for blocking in RCU callback functions
Currently blocking in an RCU callback function will result in "scheduling while atomic", which could be triggered for any number of reasons. To aid debugging, this patch introduces a rcu_callback_map that is used to tie the inappropriate voluntary context switch back to the fact that the function is being invoked from within a callback. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/update.c')
-rw-r--r--kernel/rcu/update.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index 6cb3dff89e2b..802365ccd591 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -128,6 +128,11 @@ struct lockdep_map rcu_sched_lock_map =
128 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 128 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
129EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 129EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
130 130
131static struct lock_class_key rcu_callback_key;
132struct lockdep_map rcu_callback_map =
133 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
134EXPORT_SYMBOL_GPL(rcu_callback_map);
135
131int notrace debug_lockdep_rcu_enabled(void) 136int notrace debug_lockdep_rcu_enabled(void)
132{ 137{
133 return rcu_scheduler_active && debug_locks && 138 return rcu_scheduler_active && debug_locks &&