aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-05-21 23:25:47 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-06-21 16:05:30 -0400
commit8930ed80f970a90a795239e7415c9b0e6f964649 (patch)
tree10463885f150543faaaf621e7ff85bbe6a81c911 /kernel/locking/rtmutex.c
parentc051b21f71d1ffdfd7ad406a1ef5ede5e5f974c5 (diff)
rtmutex: Cleanup deadlock detector debug logic
The conditions under which deadlock detection is conducted are unclear and undocumented. Add constants instead of using 0/1 and provide a selection function which hides the additional debug dependency from the calling code. Add comments where needed. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Link: http://lkml.kernel.org/r/20140522031949.947264874@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c77
1 files changed, 55 insertions, 22 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 32906482edd1..c6ffdaa21b67 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -308,6 +308,32 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
308} 308}
309 309
310/* 310/*
311 * Deadlock detection is conditional:
312 *
313 * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
314 * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
315 *
316 * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
317 * conducted independent of the detect argument.
318 *
319 * If the waiter argument is NULL this indicates the deboost path and
320 * deadlock detection is disabled independent of the detect argument
321 * and the config settings.
322 */
323static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
324 enum rtmutex_chainwalk chwalk)
325{
326 /*
327 * This is just a wrapper function for the following call,
328 * because debug_rt_mutex_detect_deadlock() smells like a magic
329 * debug feature and I wanted to keep the cond function in the
330 * main source file along with the comments instead of having
331 * two of the same in the headers.
332 */
333 return debug_rt_mutex_detect_deadlock(waiter, chwalk);
334}
335
336/*
311 * Max number of times we'll walk the boosting chain: 337 * Max number of times we'll walk the boosting chain:
312 */ 338 */
313int max_lock_depth = 1024; 339int max_lock_depth = 1024;
@@ -381,7 +407,7 @@ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
381 * goto again; 407 * goto again;
382 */ 408 */
383static int rt_mutex_adjust_prio_chain(struct task_struct *task, 409static int rt_mutex_adjust_prio_chain(struct task_struct *task,
384 int deadlock_detect, 410 enum rtmutex_chainwalk chwalk,
385 struct rt_mutex *orig_lock, 411 struct rt_mutex *orig_lock,
386 struct rt_mutex *next_lock, 412 struct rt_mutex *next_lock,
387 struct rt_mutex_waiter *orig_waiter, 413 struct rt_mutex_waiter *orig_waiter,
@@ -389,12 +415,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
389{ 415{
390 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; 416 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
391 struct rt_mutex_waiter *prerequeue_top_waiter; 417 struct rt_mutex_waiter *prerequeue_top_waiter;
392 int detect_deadlock, ret = 0, depth = 0; 418 int ret = 0, depth = 0;
393 struct rt_mutex *lock; 419 struct rt_mutex *lock;
420 bool detect_deadlock;
394 unsigned long flags; 421 unsigned long flags;
395 422
396 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, 423 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
397 deadlock_detect);
398 424
399 /* 425 /*
400 * The (de)boosting is a step by step approach with a lot of 426 * The (de)boosting is a step by step approach with a lot of
@@ -520,7 +546,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
520 * walk, we detected a deadlock. 546 * walk, we detected a deadlock.
521 */ 547 */
522 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 548 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
523 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 549 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
524 raw_spin_unlock(&lock->wait_lock); 550 raw_spin_unlock(&lock->wait_lock);
525 ret = -EDEADLK; 551 ret = -EDEADLK;
526 goto out_unlock_pi; 552 goto out_unlock_pi;
@@ -784,7 +810,7 @@ takeit:
784static int task_blocks_on_rt_mutex(struct rt_mutex *lock, 810static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
785 struct rt_mutex_waiter *waiter, 811 struct rt_mutex_waiter *waiter,
786 struct task_struct *task, 812 struct task_struct *task,
787 int detect_deadlock) 813 enum rtmutex_chainwalk chwalk)
788{ 814{
789 struct task_struct *owner = rt_mutex_owner(lock); 815 struct task_struct *owner = rt_mutex_owner(lock);
790 struct rt_mutex_waiter *top_waiter = waiter; 816 struct rt_mutex_waiter *top_waiter = waiter;
@@ -830,7 +856,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
830 __rt_mutex_adjust_prio(owner); 856 __rt_mutex_adjust_prio(owner);
831 if (owner->pi_blocked_on) 857 if (owner->pi_blocked_on)
832 chain_walk = 1; 858 chain_walk = 1;
833 } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { 859 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
834 chain_walk = 1; 860 chain_walk = 1;
835 } 861 }
836 862
@@ -855,7 +881,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
855 881
856 raw_spin_unlock(&lock->wait_lock); 882 raw_spin_unlock(&lock->wait_lock);
857 883
858 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, 884 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
859 next_lock, waiter, task); 885 next_lock, waiter, task);
860 886
861 raw_spin_lock(&lock->wait_lock); 887 raw_spin_lock(&lock->wait_lock);
@@ -960,7 +986,8 @@ static void remove_waiter(struct rt_mutex *lock,
960 986
961 raw_spin_unlock(&lock->wait_lock); 987 raw_spin_unlock(&lock->wait_lock);
962 988
963 rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current); 989 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
990 next_lock, NULL, current);
964 991
965 raw_spin_lock(&lock->wait_lock); 992 raw_spin_lock(&lock->wait_lock);
966} 993}
@@ -990,7 +1017,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
990 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 1017 /* gets dropped in rt_mutex_adjust_prio_chain()! */
991 get_task_struct(task); 1018 get_task_struct(task);
992 1019
993 rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task); 1020 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1021 next_lock, NULL, task);
994} 1022}
995 1023
996/** 1024/**
@@ -1068,7 +1096,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1068static int __sched 1096static int __sched
1069rt_mutex_slowlock(struct rt_mutex *lock, int state, 1097rt_mutex_slowlock(struct rt_mutex *lock, int state,
1070 struct hrtimer_sleeper *timeout, 1098 struct hrtimer_sleeper *timeout,
1071 int detect_deadlock) 1099 enum rtmutex_chainwalk chwalk)
1072{ 1100{
1073 struct rt_mutex_waiter waiter; 1101 struct rt_mutex_waiter waiter;
1074 int ret = 0; 1102 int ret = 0;
@@ -1094,7 +1122,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1094 timeout->task = NULL; 1122 timeout->task = NULL;
1095 } 1123 }
1096 1124
1097 ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); 1125 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1098 1126
1099 if (likely(!ret)) 1127 if (likely(!ret))
1100 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); 1128 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
@@ -1103,7 +1131,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1103 1131
1104 if (unlikely(ret)) { 1132 if (unlikely(ret)) {
1105 remove_waiter(lock, &waiter); 1133 remove_waiter(lock, &waiter);
1106 rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter); 1134 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1107 } 1135 }
1108 1136
1109 /* 1137 /*
@@ -1230,27 +1258,29 @@ static inline int
1230rt_mutex_fastlock(struct rt_mutex *lock, int state, 1258rt_mutex_fastlock(struct rt_mutex *lock, int state,
1231 int (*slowfn)(struct rt_mutex *lock, int state, 1259 int (*slowfn)(struct rt_mutex *lock, int state,
1232 struct hrtimer_sleeper *timeout, 1260 struct hrtimer_sleeper *timeout,
1233 int detect_deadlock)) 1261 enum rtmutex_chainwalk chwalk))
1234{ 1262{
1235 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { 1263 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1236 rt_mutex_deadlock_account_lock(lock, current); 1264 rt_mutex_deadlock_account_lock(lock, current);
1237 return 0; 1265 return 0;
1238 } else 1266 } else
1239 return slowfn(lock, state, NULL, 0); 1267 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1240} 1268}
1241 1269
1242static inline int 1270static inline int
1243rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, 1271rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1244 struct hrtimer_sleeper *timeout, int detect_deadlock, 1272 struct hrtimer_sleeper *timeout,
1273 enum rtmutex_chainwalk chwalk,
1245 int (*slowfn)(struct rt_mutex *lock, int state, 1274 int (*slowfn)(struct rt_mutex *lock, int state,
1246 struct hrtimer_sleeper *timeout, 1275 struct hrtimer_sleeper *timeout,
1247 int detect_deadlock)) 1276 enum rtmutex_chainwalk chwalk))
1248{ 1277{
1249 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { 1278 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1279 likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1250 rt_mutex_deadlock_account_lock(lock, current); 1280 rt_mutex_deadlock_account_lock(lock, current);
1251 return 0; 1281 return 0;
1252 } else 1282 } else
1253 return slowfn(lock, state, timeout, detect_deadlock); 1283 return slowfn(lock, state, timeout, chwalk);
1254} 1284}
1255 1285
1256static inline int 1286static inline int
@@ -1312,7 +1342,8 @@ int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1312{ 1342{
1313 might_sleep(); 1343 might_sleep();
1314 1344
1315 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 1, 1345 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1346 RT_MUTEX_FULL_CHAINWALK,
1316 rt_mutex_slowlock); 1347 rt_mutex_slowlock);
1317} 1348}
1318 1349
@@ -1334,7 +1365,8 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1334{ 1365{
1335 might_sleep(); 1366 might_sleep();
1336 1367
1337 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 0, 1368 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1369 RT_MUTEX_MIN_CHAINWALK,
1338 rt_mutex_slowlock); 1370 rt_mutex_slowlock);
1339} 1371}
1340EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); 1372EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
@@ -1463,7 +1495,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1463 } 1495 }
1464 1496
1465 /* We enforce deadlock detection for futexes */ 1497 /* We enforce deadlock detection for futexes */
1466 ret = task_blocks_on_rt_mutex(lock, waiter, task, 1); 1498 ret = task_blocks_on_rt_mutex(lock, waiter, task,
1499 RT_MUTEX_FULL_CHAINWALK);
1467 1500
1468 if (ret && !rt_mutex_owner(lock)) { 1501 if (ret && !rt_mutex_owner(lock)) {
1469 /* 1502 /*