diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2014-05-21 23:25:47 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2014-06-21 16:05:30 -0400 |
commit | 8930ed80f970a90a795239e7415c9b0e6f964649 (patch) | |
tree | 10463885f150543faaaf621e7ff85bbe6a81c911 /kernel | |
parent | c051b21f71d1ffdfd7ad406a1ef5ede5e5f974c5 (diff) |
rtmutex: Cleanup deadlock detector debug logic
The conditions under which deadlock detection is conducted are unclear
and undocumented.
Add constants instead of using 0/1 and provide a selection function
which hides the additional debug dependency from the calling code.
Add comments where needed.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Link: http://lkml.kernel.org/r/20140522031949.947264874@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/locking/rtmutex-debug.c | 5 | ||||
-rw-r--r-- | kernel/locking/rtmutex-debug.h | 7 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 77 | ||||
-rw-r--r-- | kernel/locking/rtmutex.h | 7 | ||||
-rw-r--r-- | kernel/locking/rtmutex_common.h | 15 |
5 files changed, 83 insertions, 28 deletions
diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 49b2ed3dced8..62b6cee8ea7f 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c | |||
@@ -66,12 +66,13 @@ void rt_mutex_debug_task_free(struct task_struct *task) | |||
66 | * the deadlock. We print when we return. act_waiter can be NULL in | 66 | * the deadlock. We print when we return. act_waiter can be NULL in |
67 | * case of a remove waiter operation. | 67 | * case of a remove waiter operation. |
68 | */ | 68 | */ |
69 | void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter, | 69 | void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, |
70 | struct rt_mutex_waiter *act_waiter, | ||
70 | struct rt_mutex *lock) | 71 | struct rt_mutex *lock) |
71 | { | 72 | { |
72 | struct task_struct *task; | 73 | struct task_struct *task; |
73 | 74 | ||
74 | if (!debug_locks || detect || !act_waiter) | 75 | if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter) |
75 | return; | 76 | return; |
76 | 77 | ||
77 | task = rt_mutex_owner(act_waiter->lock); | 78 | task = rt_mutex_owner(act_waiter->lock); |
diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h index ab29b6a22669..d0519c3432b6 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h | |||
@@ -20,14 +20,15 @@ extern void debug_rt_mutex_unlock(struct rt_mutex *lock); | |||
20 | extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, | 20 | extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, |
21 | struct task_struct *powner); | 21 | struct task_struct *powner); |
22 | extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); | 22 | extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); |
23 | extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, | 23 | extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk, |
24 | struct rt_mutex_waiter *waiter, | ||
24 | struct rt_mutex *lock); | 25 | struct rt_mutex *lock); |
25 | extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); | 26 | extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); |
26 | # define debug_rt_mutex_reset_waiter(w) \ | 27 | # define debug_rt_mutex_reset_waiter(w) \ |
27 | do { (w)->deadlock_lock = NULL; } while (0) | 28 | do { (w)->deadlock_lock = NULL; } while (0) |
28 | 29 | ||
29 | static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, | 30 | static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, |
30 | int detect) | 31 | enum rtmutex_chainwalk walk) |
31 | { | 32 | { |
32 | return (waiter != NULL); | 33 | return (waiter != NULL); |
33 | } | 34 | } |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 32906482edd1..c6ffdaa21b67 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -308,6 +308,32 @@ static void rt_mutex_adjust_prio(struct task_struct *task) | |||
308 | } | 308 | } |
309 | 309 | ||
310 | /* | 310 | /* |
311 | * Deadlock detection is conditional: | ||
312 | * | ||
313 | * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted | ||
314 | * if the detect argument is == RT_MUTEX_FULL_CHAINWALK. | ||
315 | * | ||
316 | * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always | ||
317 | * conducted independent of the detect argument. | ||
318 | * | ||
319 | * If the waiter argument is NULL this indicates the deboost path and | ||
320 | * deadlock detection is disabled independent of the detect argument | ||
321 | * and the config settings. | ||
322 | */ | ||
323 | static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, | ||
324 | enum rtmutex_chainwalk chwalk) | ||
325 | { | ||
326 | /* | ||
327 | * This is just a wrapper function for the following call, | ||
328 | * because debug_rt_mutex_detect_deadlock() smells like a magic | ||
329 | * debug feature and I wanted to keep the cond function in the | ||
330 | * main source file along with the comments instead of having | ||
331 | * two of the same in the headers. | ||
332 | */ | ||
333 | return debug_rt_mutex_detect_deadlock(waiter, chwalk); | ||
334 | } | ||
335 | |||
336 | /* | ||
311 | * Max number of times we'll walk the boosting chain: | 337 | * Max number of times we'll walk the boosting chain: |
312 | */ | 338 | */ |
313 | int max_lock_depth = 1024; | 339 | int max_lock_depth = 1024; |
@@ -381,7 +407,7 @@ static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) | |||
381 | * goto again; | 407 | * goto again; |
382 | */ | 408 | */ |
383 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, | 409 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
384 | int deadlock_detect, | 410 | enum rtmutex_chainwalk chwalk, |
385 | struct rt_mutex *orig_lock, | 411 | struct rt_mutex *orig_lock, |
386 | struct rt_mutex *next_lock, | 412 | struct rt_mutex *next_lock, |
387 | struct rt_mutex_waiter *orig_waiter, | 413 | struct rt_mutex_waiter *orig_waiter, |
@@ -389,12 +415,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
389 | { | 415 | { |
390 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | 416 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; |
391 | struct rt_mutex_waiter *prerequeue_top_waiter; | 417 | struct rt_mutex_waiter *prerequeue_top_waiter; |
392 | int detect_deadlock, ret = 0, depth = 0; | 418 | int ret = 0, depth = 0; |
393 | struct rt_mutex *lock; | 419 | struct rt_mutex *lock; |
420 | bool detect_deadlock; | ||
394 | unsigned long flags; | 421 | unsigned long flags; |
395 | 422 | ||
396 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | 423 | detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); |
397 | deadlock_detect); | ||
398 | 424 | ||
399 | /* | 425 | /* |
400 | * The (de)boosting is a step by step approach with a lot of | 426 | * The (de)boosting is a step by step approach with a lot of |
@@ -520,7 +546,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
520 | * walk, we detected a deadlock. | 546 | * walk, we detected a deadlock. |
521 | */ | 547 | */ |
522 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { | 548 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
523 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | 549 | debug_rt_mutex_deadlock(chwalk, orig_waiter, lock); |
524 | raw_spin_unlock(&lock->wait_lock); | 550 | raw_spin_unlock(&lock->wait_lock); |
525 | ret = -EDEADLK; | 551 | ret = -EDEADLK; |
526 | goto out_unlock_pi; | 552 | goto out_unlock_pi; |
@@ -784,7 +810,7 @@ takeit: | |||
784 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | 810 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
785 | struct rt_mutex_waiter *waiter, | 811 | struct rt_mutex_waiter *waiter, |
786 | struct task_struct *task, | 812 | struct task_struct *task, |
787 | int detect_deadlock) | 813 | enum rtmutex_chainwalk chwalk) |
788 | { | 814 | { |
789 | struct task_struct *owner = rt_mutex_owner(lock); | 815 | struct task_struct *owner = rt_mutex_owner(lock); |
790 | struct rt_mutex_waiter *top_waiter = waiter; | 816 | struct rt_mutex_waiter *top_waiter = waiter; |
@@ -830,7 +856,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
830 | __rt_mutex_adjust_prio(owner); | 856 | __rt_mutex_adjust_prio(owner); |
831 | if (owner->pi_blocked_on) | 857 | if (owner->pi_blocked_on) |
832 | chain_walk = 1; | 858 | chain_walk = 1; |
833 | } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) { | 859 | } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { |
834 | chain_walk = 1; | 860 | chain_walk = 1; |
835 | } | 861 | } |
836 | 862 | ||
@@ -855,7 +881,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
855 | 881 | ||
856 | raw_spin_unlock(&lock->wait_lock); | 882 | raw_spin_unlock(&lock->wait_lock); |
857 | 883 | ||
858 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, | 884 | res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, |
859 | next_lock, waiter, task); | 885 | next_lock, waiter, task); |
860 | 886 | ||
861 | raw_spin_lock(&lock->wait_lock); | 887 | raw_spin_lock(&lock->wait_lock); |
@@ -960,7 +986,8 @@ static void remove_waiter(struct rt_mutex *lock, | |||
960 | 986 | ||
961 | raw_spin_unlock(&lock->wait_lock); | 987 | raw_spin_unlock(&lock->wait_lock); |
962 | 988 | ||
963 | rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current); | 989 | rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, |
990 | next_lock, NULL, current); | ||
964 | 991 | ||
965 | raw_spin_lock(&lock->wait_lock); | 992 | raw_spin_lock(&lock->wait_lock); |
966 | } | 993 | } |
@@ -990,7 +1017,8 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
990 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | 1017 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ |
991 | get_task_struct(task); | 1018 | get_task_struct(task); |
992 | 1019 | ||
993 | rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task); | 1020 | rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, |
1021 | next_lock, NULL, task); | ||
994 | } | 1022 | } |
995 | 1023 | ||
996 | /** | 1024 | /** |
@@ -1068,7 +1096,7 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, | |||
1068 | static int __sched | 1096 | static int __sched |
1069 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | 1097 | rt_mutex_slowlock(struct rt_mutex *lock, int state, |
1070 | struct hrtimer_sleeper *timeout, | 1098 | struct hrtimer_sleeper *timeout, |
1071 | int detect_deadlock) | 1099 | enum rtmutex_chainwalk chwalk) |
1072 | { | 1100 | { |
1073 | struct rt_mutex_waiter waiter; | 1101 | struct rt_mutex_waiter waiter; |
1074 | int ret = 0; | 1102 | int ret = 0; |
@@ -1094,7 +1122,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
1094 | timeout->task = NULL; | 1122 | timeout->task = NULL; |
1095 | } | 1123 | } |
1096 | 1124 | ||
1097 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); | 1125 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); |
1098 | 1126 | ||
1099 | if (likely(!ret)) | 1127 | if (likely(!ret)) |
1100 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | 1128 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); |
@@ -1103,7 +1131,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
1103 | 1131 | ||
1104 | if (unlikely(ret)) { | 1132 | if (unlikely(ret)) { |
1105 | remove_waiter(lock, &waiter); | 1133 | remove_waiter(lock, &waiter); |
1106 | rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter); | 1134 | rt_mutex_handle_deadlock(ret, chwalk, &waiter); |
1107 | } | 1135 | } |
1108 | 1136 | ||
1109 | /* | 1137 | /* |
@@ -1230,27 +1258,29 @@ static inline int | |||
1230 | rt_mutex_fastlock(struct rt_mutex *lock, int state, | 1258 | rt_mutex_fastlock(struct rt_mutex *lock, int state, |
1231 | int (*slowfn)(struct rt_mutex *lock, int state, | 1259 | int (*slowfn)(struct rt_mutex *lock, int state, |
1232 | struct hrtimer_sleeper *timeout, | 1260 | struct hrtimer_sleeper *timeout, |
1233 | int detect_deadlock)) | 1261 | enum rtmutex_chainwalk chwalk)) |
1234 | { | 1262 | { |
1235 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | 1263 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { |
1236 | rt_mutex_deadlock_account_lock(lock, current); | 1264 | rt_mutex_deadlock_account_lock(lock, current); |
1237 | return 0; | 1265 | return 0; |
1238 | } else | 1266 | } else |
1239 | return slowfn(lock, state, NULL, 0); | 1267 | return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); |
1240 | } | 1268 | } |
1241 | 1269 | ||
1242 | static inline int | 1270 | static inline int |
1243 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | 1271 | rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, |
1244 | struct hrtimer_sleeper *timeout, int detect_deadlock, | 1272 | struct hrtimer_sleeper *timeout, |
1273 | enum rtmutex_chainwalk chwalk, | ||
1245 | int (*slowfn)(struct rt_mutex *lock, int state, | 1274 | int (*slowfn)(struct rt_mutex *lock, int state, |
1246 | struct hrtimer_sleeper *timeout, | 1275 | struct hrtimer_sleeper *timeout, |
1247 | int detect_deadlock)) | 1276 | enum rtmutex_chainwalk chwalk)) |
1248 | { | 1277 | { |
1249 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | 1278 | if (chwalk == RT_MUTEX_MIN_CHAINWALK && |
1279 | likely(rt_mutex_cmpxchg(lock, NULL, current))) { | ||
1250 | rt_mutex_deadlock_account_lock(lock, current); | 1280 | rt_mutex_deadlock_account_lock(lock, current); |
1251 | return 0; | 1281 | return 0; |
1252 | } else | 1282 | } else |
1253 | return slowfn(lock, state, timeout, detect_deadlock); | 1283 | return slowfn(lock, state, timeout, chwalk); |
1254 | } | 1284 | } |
1255 | 1285 | ||
1256 | static inline int | 1286 | static inline int |
@@ -1312,7 +1342,8 @@ int rt_mutex_timed_futex_lock(struct rt_mutex *lock, | |||
1312 | { | 1342 | { |
1313 | might_sleep(); | 1343 | might_sleep(); |
1314 | 1344 | ||
1315 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 1, | 1345 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, |
1346 | RT_MUTEX_FULL_CHAINWALK, | ||
1316 | rt_mutex_slowlock); | 1347 | rt_mutex_slowlock); |
1317 | } | 1348 | } |
1318 | 1349 | ||
@@ -1334,7 +1365,8 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) | |||
1334 | { | 1365 | { |
1335 | might_sleep(); | 1366 | might_sleep(); |
1336 | 1367 | ||
1337 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 0, | 1368 | return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, |
1369 | RT_MUTEX_MIN_CHAINWALK, | ||
1338 | rt_mutex_slowlock); | 1370 | rt_mutex_slowlock); |
1339 | } | 1371 | } |
1340 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); | 1372 | EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); |
@@ -1463,7 +1495,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1463 | } | 1495 | } |
1464 | 1496 | ||
1465 | /* We enforce deadlock detection for futexes */ | 1497 | /* We enforce deadlock detection for futexes */ |
1466 | ret = task_blocks_on_rt_mutex(lock, waiter, task, 1); | 1498 | ret = task_blocks_on_rt_mutex(lock, waiter, task, |
1499 | RT_MUTEX_FULL_CHAINWALK); | ||
1467 | 1500 | ||
1468 | if (ret && !rt_mutex_owner(lock)) { | 1501 | if (ret && !rt_mutex_owner(lock)) { |
1469 | /* | 1502 | /* |
diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h index f6a1f3c133b1..c4060584c407 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h | |||
@@ -22,10 +22,15 @@ | |||
22 | #define debug_rt_mutex_init(m, n) do { } while (0) | 22 | #define debug_rt_mutex_init(m, n) do { } while (0) |
23 | #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) | 23 | #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) |
24 | #define debug_rt_mutex_print_deadlock(w) do { } while (0) | 24 | #define debug_rt_mutex_print_deadlock(w) do { } while (0) |
25 | #define debug_rt_mutex_detect_deadlock(w,d) (d) | ||
26 | #define debug_rt_mutex_reset_waiter(w) do { } while (0) | 25 | #define debug_rt_mutex_reset_waiter(w) do { } while (0) |
27 | 26 | ||
28 | static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) | 27 | static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w) |
29 | { | 28 | { |
30 | WARN(1, "rtmutex deadlock detected\n"); | 29 | WARN(1, "rtmutex deadlock detected\n"); |
31 | } | 30 | } |
31 | |||
32 | static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w, | ||
33 | enum rtmutex_chainwalk walk) | ||
34 | { | ||
35 | return walk == RT_MUTEX_FULL_CHAINWALK; | ||
36 | } | ||
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index cd3ec209d0c8..855212501407 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h | |||
@@ -102,6 +102,21 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * Constants for rt mutex functions which have a selectable deadlock | ||
106 | * detection. | ||
107 | * | ||
108 | * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are | ||
109 | * no further PI adjustments to be made. | ||
110 | * | ||
111 | * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full | ||
112 | * walk of the lock chain. | ||
113 | */ | ||
114 | enum rtmutex_chainwalk { | ||
115 | RT_MUTEX_MIN_CHAINWALK, | ||
116 | RT_MUTEX_FULL_CHAINWALK, | ||
117 | }; | ||
118 | |||
119 | /* | ||
105 | * PI-futex support (proxy locking functions, etc.): | 120 | * PI-futex support (proxy locking functions, etc.): |
106 | */ | 121 | */ |
107 | extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); | 122 | extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); |