diff options
-rw-r--r-- | include/linux/lockdep.h | 3 | ||||
-rw-r--r-- | include/linux/mutex.h | 9 | ||||
-rw-r--r-- | kernel/mutex.c | 25 |
3 files changed, 29 insertions, 8 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 4aef1dda6406..ef820a3c378b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -487,12 +487,15 @@ static inline void print_irqtrace_events(struct task_struct *curr) | |||
487 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 487 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
488 | # ifdef CONFIG_PROVE_LOCKING | 488 | # ifdef CONFIG_PROVE_LOCKING |
489 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) | 489 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
490 | # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) | ||
490 | # else | 491 | # else |
491 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) | 492 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
493 | # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) | ||
492 | # endif | 494 | # endif |
493 | # define mutex_release(l, n, i) lock_release(l, n, i) | 495 | # define mutex_release(l, n, i) lock_release(l, n, i) |
494 | #else | 496 | #else |
495 | # define mutex_acquire(l, s, t, i) do { } while (0) | 497 | # define mutex_acquire(l, s, t, i) do { } while (0) |
498 | # define mutex_acquire_nest(l, s, t, n, i) do { } while (0) | ||
496 | # define mutex_release(l, n, i) do { } while (0) | 499 | # define mutex_release(l, n, i) do { } while (0) |
497 | #endif | 500 | #endif |
498 | 501 | ||
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index c75471db576e..a940fe435aca 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -132,6 +132,7 @@ static inline int mutex_is_locked(struct mutex *lock) | |||
132 | */ | 132 | */ |
133 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 133 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
134 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); | 134 | extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); |
135 | extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); | ||
135 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, | 136 | extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, |
136 | unsigned int subclass); | 137 | unsigned int subclass); |
137 | extern int __must_check mutex_lock_killable_nested(struct mutex *lock, | 138 | extern int __must_check mutex_lock_killable_nested(struct mutex *lock, |
@@ -140,6 +141,13 @@ extern int __must_check mutex_lock_killable_nested(struct mutex *lock, | |||
140 | #define mutex_lock(lock) mutex_lock_nested(lock, 0) | 141 | #define mutex_lock(lock) mutex_lock_nested(lock, 0) |
141 | #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) | 142 | #define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) |
142 | #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) | 143 | #define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) |
144 | |||
145 | #define mutex_lock_nest_lock(lock, nest_lock) \ | ||
146 | do { \ | ||
147 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ | ||
148 | _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ | ||
149 | } while (0) | ||
150 | |||
143 | #else | 151 | #else |
144 | extern void mutex_lock(struct mutex *lock); | 152 | extern void mutex_lock(struct mutex *lock); |
145 | extern int __must_check mutex_lock_interruptible(struct mutex *lock); | 153 | extern int __must_check mutex_lock_interruptible(struct mutex *lock); |
@@ -148,6 +156,7 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); | |||
148 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) | 156 | # define mutex_lock_nested(lock, subclass) mutex_lock(lock) |
149 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) | 157 | # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) |
150 | # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) | 158 | # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) |
159 | # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) | ||
151 | #endif | 160 | #endif |
152 | 161 | ||
153 | /* | 162 | /* |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 2c938e2337cd..d607ed5dd441 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -131,14 +131,14 @@ EXPORT_SYMBOL(mutex_unlock); | |||
131 | */ | 131 | */ |
132 | static inline int __sched | 132 | static inline int __sched |
133 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | 133 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
134 | unsigned long ip) | 134 | struct lockdep_map *nest_lock, unsigned long ip) |
135 | { | 135 | { |
136 | struct task_struct *task = current; | 136 | struct task_struct *task = current; |
137 | struct mutex_waiter waiter; | 137 | struct mutex_waiter waiter; |
138 | unsigned long flags; | 138 | unsigned long flags; |
139 | 139 | ||
140 | preempt_disable(); | 140 | preempt_disable(); |
141 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | 141 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
142 | 142 | ||
143 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 143 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
144 | /* | 144 | /* |
@@ -269,16 +269,25 @@ void __sched | |||
269 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | 269 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
270 | { | 270 | { |
271 | might_sleep(); | 271 | might_sleep(); |
272 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); | 272 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
273 | } | 273 | } |
274 | 274 | ||
275 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | 275 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
276 | 276 | ||
277 | void __sched | ||
278 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | ||
279 | { | ||
280 | might_sleep(); | ||
281 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); | ||
282 | } | ||
283 | |||
284 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); | ||
285 | |||
277 | int __sched | 286 | int __sched |
278 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | 287 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
279 | { | 288 | { |
280 | might_sleep(); | 289 | might_sleep(); |
281 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); | 290 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
282 | } | 291 | } |
283 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | 292 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
284 | 293 | ||
@@ -287,7 +296,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |||
287 | { | 296 | { |
288 | might_sleep(); | 297 | might_sleep(); |
289 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, | 298 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
290 | subclass, _RET_IP_); | 299 | subclass, NULL, _RET_IP_); |
291 | } | 300 | } |
292 | 301 | ||
293 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 302 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
@@ -393,7 +402,7 @@ __mutex_lock_slowpath(atomic_t *lock_count) | |||
393 | { | 402 | { |
394 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 403 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
395 | 404 | ||
396 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); | 405 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
397 | } | 406 | } |
398 | 407 | ||
399 | static noinline int __sched | 408 | static noinline int __sched |
@@ -401,7 +410,7 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count) | |||
401 | { | 410 | { |
402 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 411 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
403 | 412 | ||
404 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); | 413 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
405 | } | 414 | } |
406 | 415 | ||
407 | static noinline int __sched | 416 | static noinline int __sched |
@@ -409,7 +418,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | |||
409 | { | 418 | { |
410 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 419 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
411 | 420 | ||
412 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); | 421 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
413 | } | 422 | } |
414 | #endif | 423 | #endif |
415 | 424 | ||