aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rtmutex-debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rtmutex-debug.c')
-rw-r--r--kernel/rtmutex-debug.c107
1 files changed, 40 insertions, 67 deletions
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index ddabb54bb5c8..e7e6314221ca 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -29,61 +29,6 @@
29 29
30#include "rtmutex_common.h" 30#include "rtmutex_common.h"
31 31
32# define TRACE_WARN_ON(x) WARN_ON(x)
33# define TRACE_BUG_ON(x) BUG_ON(x)
34
35# define TRACE_OFF() \
36do { \
37 if (rt_trace_on) { \
38 rt_trace_on = 0; \
39 console_verbose(); \
40 if (raw_spin_is_locked(&current->pi_lock)) \
41 raw_spin_unlock(&current->pi_lock); \
42 } \
43} while (0)
44
45# define TRACE_OFF_NOLOCK() \
46do { \
47 if (rt_trace_on) { \
48 rt_trace_on = 0; \
49 console_verbose(); \
50 } \
51} while (0)
52
53# define TRACE_BUG_LOCKED() \
54do { \
55 TRACE_OFF(); \
56 BUG(); \
57} while (0)
58
59# define TRACE_WARN_ON_LOCKED(c) \
60do { \
61 if (unlikely(c)) { \
62 TRACE_OFF(); \
63 WARN_ON(1); \
64 } \
65} while (0)
66
67# define TRACE_BUG_ON_LOCKED(c) \
68do { \
69 if (unlikely(c)) \
70 TRACE_BUG_LOCKED(); \
71} while (0)
72
73#ifdef CONFIG_SMP
74# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
75#else
76# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
77#endif
78
79/*
80 * deadlock detection flag. We turn it off when we detect
81 * the first problem because we dont want to recurse back
82 * into the tracing code when doing error printk or
83 * executing a BUG():
84 */
85static int rt_trace_on = 1;
86
87static void printk_task(struct task_struct *p) 32static void printk_task(struct task_struct *p)
88{ 33{
89 if (p) 34 if (p)
@@ -111,8 +56,8 @@ static void printk_lock(struct rt_mutex *lock, int print_owner)
111 56
112void rt_mutex_debug_task_free(struct task_struct *task) 57void rt_mutex_debug_task_free(struct task_struct *task)
113{ 58{
114 WARN_ON(!plist_head_empty(&task->pi_waiters)); 59 DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters));
115 WARN_ON(task->pi_blocked_on); 60 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
116} 61}
117 62
118/* 63/*
@@ -125,7 +70,7 @@ void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
125{ 70{
126 struct task_struct *task; 71 struct task_struct *task;
127 72
128 if (!rt_trace_on || detect || !act_waiter) 73 if (!debug_locks || detect || !act_waiter)
129 return; 74 return;
130 75
131 task = rt_mutex_owner(act_waiter->lock); 76 task = rt_mutex_owner(act_waiter->lock);
@@ -139,7 +84,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
139{ 84{
140 struct task_struct *task; 85 struct task_struct *task;
141 86
142 if (!waiter->deadlock_lock || !rt_trace_on) 87 if (!waiter->deadlock_lock || !debug_locks)
143 return; 88 return;
144 89
145 rcu_read_lock(); 90 rcu_read_lock();
@@ -149,7 +94,8 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
149 return; 94 return;
150 } 95 }
151 96
152 TRACE_OFF_NOLOCK(); 97 if (!debug_locks_off())
98 return;
153 99
154 printk("\n============================================\n"); 100 printk("\n============================================\n");
155 printk( "[ BUG: circular locking deadlock detected! ]\n"); 101 printk( "[ BUG: circular locking deadlock detected! ]\n");
@@ -180,7 +126,6 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
180 126
181 printk("[ turning off deadlock detection." 127 printk("[ turning off deadlock detection."
182 "Please report this trace. ]\n\n"); 128 "Please report this trace. ]\n\n");
183 local_irq_disable();
184} 129}
185 130
186void debug_rt_mutex_lock(struct rt_mutex *lock) 131void debug_rt_mutex_lock(struct rt_mutex *lock)
@@ -189,7 +134,8 @@ void debug_rt_mutex_lock(struct rt_mutex *lock)
189 134
190void debug_rt_mutex_unlock(struct rt_mutex *lock) 135void debug_rt_mutex_unlock(struct rt_mutex *lock)
191{ 136{
192 TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); 137 if (debug_locks)
138 DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
193} 139}
194 140
195void 141void
@@ -199,7 +145,7 @@ debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner)
199 145
200void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) 146void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
201{ 147{
202 TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock)); 148 DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
203} 149}
204 150
205void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 151void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
@@ -213,9 +159,9 @@ void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
213void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) 159void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
214{ 160{
215 put_pid(waiter->deadlock_task_pid); 161 put_pid(waiter->deadlock_task_pid);
216 TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry)); 162 DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
217 TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); 163 DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
218 TRACE_WARN_ON(waiter->task); 164 DEBUG_LOCKS_WARN_ON(waiter->task);
219 memset(waiter, 0x22, sizeof(*waiter)); 165 memset(waiter, 0x22, sizeof(*waiter));
220} 166}
221 167
@@ -231,9 +177,36 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
231void 177void
232rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) 178rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)
233{ 179{
180#ifdef CONFIG_DEBUG_PREEMPT
181 if (atomic_read(&task->lock_count) >= MAX_LOCK_STACK) {
182 if (!debug_locks_off())
183 return;
184 printk("BUG: %s/%d: lock count overflow!\n",
185 task->comm, task->pid);
186 dump_stack();
187 return;
188 }
189#ifdef CONFIG_PREEMPT_RT
190 task->owned_lock[atomic_read(&task->lock_count)] = lock;
191#endif
192 atomic_inc(&task->lock_count);
193#endif
234} 194}
235 195
236void rt_mutex_deadlock_account_unlock(struct task_struct *task) 196void rt_mutex_deadlock_account_unlock(struct task_struct *task)
237{ 197{
198#ifdef CONFIG_DEBUG_PREEMPT
199 if (!atomic_read(&task->lock_count)) {
200 if (!debug_locks_off())
201 return;
202 printk("BUG: %s/%d: lock count underflow!\n",
203 task->comm, task->pid);
204 dump_stack();
205 return;
206 }
207 atomic_dec(&task->lock_count);
208#ifdef CONFIG_PREEMPT_RT
209 task->owned_lock[atomic_read(&task->lock_count)] = NULL;
210#endif
211#endif
238} 212}
239