diff options
author | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2013-07-05 03:29:32 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-12 06:07:46 -0400 |
commit | 1b375dc30710180c4b88cc59caba6e3481ec5c8b (patch) | |
tree | 24b5cfc929db57207afec4633909b8e6755df5f9 /include | |
parent | 2e17c5a97e231f3cb426f4b7895eab5be5c5442e (diff) |
mutex: Move ww_mutex definitions to ww_mutex.h
Move the definitions for wound/wait mutexes out to a separate
header, ww_mutex.h. This reduces clutter in mutex.h, and
increases readability.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Cc: Dave Airlie <airlied@gmail.com>
Link: http://lkml.kernel.org/r/51D675DC.3000907@canonical.com
[ Tidied up the code a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/mutex.h | 358 | ||||
-rw-r--r-- | include/linux/reservation.h | 2 | ||||
-rw-r--r-- | include/linux/ww_mutex.h | 378 |
3 files changed, 379 insertions, 359 deletions
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 3793ed7feeeb..ccd4260834c5 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -78,40 +78,6 @@ struct mutex_waiter { | |||
78 | #endif | 78 | #endif |
79 | }; | 79 | }; |
80 | 80 | ||
81 | struct ww_class { | ||
82 | atomic_long_t stamp; | ||
83 | struct lock_class_key acquire_key; | ||
84 | struct lock_class_key mutex_key; | ||
85 | const char *acquire_name; | ||
86 | const char *mutex_name; | ||
87 | }; | ||
88 | |||
89 | struct ww_acquire_ctx { | ||
90 | struct task_struct *task; | ||
91 | unsigned long stamp; | ||
92 | unsigned acquired; | ||
93 | #ifdef CONFIG_DEBUG_MUTEXES | ||
94 | unsigned done_acquire; | ||
95 | struct ww_class *ww_class; | ||
96 | struct ww_mutex *contending_lock; | ||
97 | #endif | ||
98 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
99 | struct lockdep_map dep_map; | ||
100 | #endif | ||
101 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
102 | unsigned deadlock_inject_interval; | ||
103 | unsigned deadlock_inject_countdown; | ||
104 | #endif | ||
105 | }; | ||
106 | |||
107 | struct ww_mutex { | ||
108 | struct mutex base; | ||
109 | struct ww_acquire_ctx *ctx; | ||
110 | #ifdef CONFIG_DEBUG_MUTEXES | ||
111 | struct ww_class *ww_class; | ||
112 | #endif | ||
113 | }; | ||
114 | |||
115 | #ifdef CONFIG_DEBUG_MUTEXES | 81 | #ifdef CONFIG_DEBUG_MUTEXES |
116 | # include <linux/mutex-debug.h> | 82 | # include <linux/mutex-debug.h> |
117 | #else | 83 | #else |
@@ -136,11 +102,8 @@ static inline void mutex_destroy(struct mutex *lock) {} | |||
136 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 102 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
137 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ | 103 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ |
138 | , .dep_map = { .name = #lockname } | 104 | , .dep_map = { .name = #lockname } |
139 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ | ||
140 | , .ww_class = &ww_class | ||
141 | #else | 105 | #else |
142 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) | 106 | # define __DEP_MAP_MUTEX_INITIALIZER(lockname) |
143 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) | ||
144 | #endif | 107 | #endif |
145 | 108 | ||
146 | #define __MUTEX_INITIALIZER(lockname) \ | 109 | #define __MUTEX_INITIALIZER(lockname) \ |
@@ -150,49 +113,13 @@ static inline void mutex_destroy(struct mutex *lock) {} | |||
150 | __DEBUG_MUTEX_INITIALIZER(lockname) \ | 113 | __DEBUG_MUTEX_INITIALIZER(lockname) \ |
151 | __DEP_MAP_MUTEX_INITIALIZER(lockname) } | 114 | __DEP_MAP_MUTEX_INITIALIZER(lockname) } |
152 | 115 | ||
153 | #define __WW_CLASS_INITIALIZER(ww_class) \ | ||
154 | { .stamp = ATOMIC_LONG_INIT(0) \ | ||
155 | , .acquire_name = #ww_class "_acquire" \ | ||
156 | , .mutex_name = #ww_class "_mutex" } | ||
157 | |||
158 | #define __WW_MUTEX_INITIALIZER(lockname, class) \ | ||
159 | { .base = { \__MUTEX_INITIALIZER(lockname) } \ | ||
160 | __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } | ||
161 | |||
162 | #define DEFINE_MUTEX(mutexname) \ | 116 | #define DEFINE_MUTEX(mutexname) \ |
163 | struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) | 117 | struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) |
164 | 118 | ||
165 | #define DEFINE_WW_CLASS(classname) \ | ||
166 | struct ww_class classname = __WW_CLASS_INITIALIZER(classname) | ||
167 | |||
168 | #define DEFINE_WW_MUTEX(mutexname, ww_class) \ | ||
169 | struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) | ||
170 | |||
171 | |||
172 | extern void __mutex_init(struct mutex *lock, const char *name, | 119 | extern void __mutex_init(struct mutex *lock, const char *name, |
173 | struct lock_class_key *key); | 120 | struct lock_class_key *key); |
174 | 121 | ||
175 | /** | 122 | /** |
176 | * ww_mutex_init - initialize the w/w mutex | ||
177 | * @lock: the mutex to be initialized | ||
178 | * @ww_class: the w/w class the mutex should belong to | ||
179 | * | ||
180 | * Initialize the w/w mutex to unlocked state and associate it with the given | ||
181 | * class. | ||
182 | * | ||
183 | * It is not allowed to initialize an already locked mutex. | ||
184 | */ | ||
185 | static inline void ww_mutex_init(struct ww_mutex *lock, | ||
186 | struct ww_class *ww_class) | ||
187 | { | ||
188 | __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); | ||
189 | lock->ctx = NULL; | ||
190 | #ifdef CONFIG_DEBUG_MUTEXES | ||
191 | lock->ww_class = ww_class; | ||
192 | #endif | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * mutex_is_locked - is the mutex locked | 123 | * mutex_is_locked - is the mutex locked |
197 | * @lock: the mutex to be queried | 124 | * @lock: the mutex to be queried |
198 | * | 125 | * |
@@ -246,291 +173,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); | |||
246 | extern int mutex_trylock(struct mutex *lock); | 173 | extern int mutex_trylock(struct mutex *lock); |
247 | extern void mutex_unlock(struct mutex *lock); | 174 | extern void mutex_unlock(struct mutex *lock); |
248 | 175 | ||
249 | /** | ||
250 | * ww_acquire_init - initialize a w/w acquire context | ||
251 | * @ctx: w/w acquire context to initialize | ||
252 | * @ww_class: w/w class of the context | ||
253 | * | ||
254 | * Initializes an context to acquire multiple mutexes of the given w/w class. | ||
255 | * | ||
256 | * Context-based w/w mutex acquiring can be done in any order whatsoever within | ||
257 | * a given lock class. Deadlocks will be detected and handled with the | ||
258 | * wait/wound logic. | ||
259 | * | ||
260 | * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can | ||
261 | * result in undetected deadlocks and is so forbidden. Mixing different contexts | ||
262 | * for the same w/w class when acquiring mutexes can also result in undetected | ||
263 | * deadlocks, and is hence also forbidden. Both types of abuse will be caught by | ||
264 | * enabling CONFIG_PROVE_LOCKING. | ||
265 | * | ||
266 | * Nesting of acquire contexts for _different_ w/w classes is possible, subject | ||
267 | * to the usual locking rules between different lock classes. | ||
268 | * | ||
269 | * An acquire context must be released with ww_acquire_fini by the same task | ||
270 | * before the memory is freed. It is recommended to allocate the context itself | ||
271 | * on the stack. | ||
272 | */ | ||
273 | static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, | ||
274 | struct ww_class *ww_class) | ||
275 | { | ||
276 | ctx->task = current; | ||
277 | ctx->stamp = atomic_long_inc_return(&ww_class->stamp); | ||
278 | ctx->acquired = 0; | ||
279 | #ifdef CONFIG_DEBUG_MUTEXES | ||
280 | ctx->ww_class = ww_class; | ||
281 | ctx->done_acquire = 0; | ||
282 | ctx->contending_lock = NULL; | ||
283 | #endif | ||
284 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
285 | debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); | ||
286 | lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, | ||
287 | &ww_class->acquire_key, 0); | ||
288 | mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); | ||
289 | #endif | ||
290 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
291 | ctx->deadlock_inject_interval = 1; | ||
292 | ctx->deadlock_inject_countdown = ctx->stamp & 0xf; | ||
293 | #endif | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * ww_acquire_done - marks the end of the acquire phase | ||
298 | * @ctx: the acquire context | ||
299 | * | ||
300 | * Marks the end of the acquire phase, any further w/w mutex lock calls using | ||
301 | * this context are forbidden. | ||
302 | * | ||
303 | * Calling this function is optional, it is just useful to document w/w mutex | ||
304 | * code and clearly designated the acquire phase from actually using the locked | ||
305 | * data structures. | ||
306 | */ | ||
307 | static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) | ||
308 | { | ||
309 | #ifdef CONFIG_DEBUG_MUTEXES | ||
310 | lockdep_assert_held(ctx); | ||
311 | |||
312 | DEBUG_LOCKS_WARN_ON(ctx->done_acquire); | ||
313 | ctx->done_acquire = 1; | ||
314 | #endif | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * ww_acquire_fini - releases a w/w acquire context | ||
319 | * @ctx: the acquire context to free | ||
320 | * | ||
321 | * Releases a w/w acquire context. This must be called _after_ all acquired w/w | ||
322 | * mutexes have been released with ww_mutex_unlock. | ||
323 | */ | ||
324 | static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) | ||
325 | { | ||
326 | #ifdef CONFIG_DEBUG_MUTEXES | ||
327 | mutex_release(&ctx->dep_map, 0, _THIS_IP_); | ||
328 | |||
329 | DEBUG_LOCKS_WARN_ON(ctx->acquired); | ||
330 | if (!config_enabled(CONFIG_PROVE_LOCKING)) | ||
331 | /* | ||
332 | * lockdep will normally handle this, | ||
333 | * but fail without anyway | ||
334 | */ | ||
335 | ctx->done_acquire = 1; | ||
336 | |||
337 | if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) | ||
338 | /* ensure ww_acquire_fini will still fail if called twice */ | ||
339 | ctx->acquired = ~0U; | ||
340 | #endif | ||
341 | } | ||
342 | |||
343 | extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, | ||
344 | struct ww_acquire_ctx *ctx); | ||
345 | extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, | ||
346 | struct ww_acquire_ctx *ctx); | ||
347 | |||
348 | /** | ||
349 | * ww_mutex_lock - acquire the w/w mutex | ||
350 | * @lock: the mutex to be acquired | ||
351 | * @ctx: w/w acquire context, or NULL to acquire only a single lock. | ||
352 | * | ||
353 | * Lock the w/w mutex exclusively for this task. | ||
354 | * | ||
355 | * Deadlocks within a given w/w class of locks are detected and handled with the | ||
356 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | ||
357 | * will either sleep until it is (wait case). Or it selects the current context | ||
358 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | ||
359 | * same lock with the same context twice is also detected and signalled by | ||
360 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. | ||
361 | * | ||
362 | * In the wound case the caller must release all currently held w/w mutexes for | ||
363 | * the given context and then wait for this contending lock to be available by | ||
364 | * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this | ||
365 | * lock and proceed with trying to acquire further w/w mutexes (e.g. when | ||
366 | * scanning through lru lists trying to free resources). | ||
367 | * | ||
368 | * The mutex must later on be released by the same task that | ||
369 | * acquired it. The task may not exit without first unlocking the mutex. Also, | ||
370 | * kernel memory where the mutex resides must not be freed with the mutex still | ||
371 | * locked. The mutex must first be initialized (or statically defined) before it | ||
372 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be | ||
373 | * of the same w/w lock class as was used to initialize the acquire context. | ||
374 | * | ||
375 | * A mutex acquired with this function must be released with ww_mutex_unlock. | ||
376 | */ | ||
377 | static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
378 | { | ||
379 | if (ctx) | ||
380 | return __ww_mutex_lock(lock, ctx); | ||
381 | else { | ||
382 | mutex_lock(&lock->base); | ||
383 | return 0; | ||
384 | } | ||
385 | } | ||
386 | |||
387 | /** | ||
388 | * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible | ||
389 | * @lock: the mutex to be acquired | ||
390 | * @ctx: w/w acquire context | ||
391 | * | ||
392 | * Lock the w/w mutex exclusively for this task. | ||
393 | * | ||
394 | * Deadlocks within a given w/w class of locks are detected and handled with the | ||
395 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | ||
396 | * will either sleep until it is (wait case). Or it selects the current context | ||
397 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | ||
398 | * same lock with the same context twice is also detected and signalled by | ||
399 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a | ||
400 | * signal arrives while waiting for the lock then this function returns -EINTR. | ||
401 | * | ||
402 | * In the wound case the caller must release all currently held w/w mutexes for | ||
403 | * the given context and then wait for this contending lock to be available by | ||
404 | * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to | ||
405 | * not acquire this lock and proceed with trying to acquire further w/w mutexes | ||
406 | * (e.g. when scanning through lru lists trying to free resources). | ||
407 | * | ||
408 | * The mutex must later on be released by the same task that | ||
409 | * acquired it. The task may not exit without first unlocking the mutex. Also, | ||
410 | * kernel memory where the mutex resides must not be freed with the mutex still | ||
411 | * locked. The mutex must first be initialized (or statically defined) before it | ||
412 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be | ||
413 | * of the same w/w lock class as was used to initialize the acquire context. | ||
414 | * | ||
415 | * A mutex acquired with this function must be released with ww_mutex_unlock. | ||
416 | */ | ||
417 | static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, | ||
418 | struct ww_acquire_ctx *ctx) | ||
419 | { | ||
420 | if (ctx) | ||
421 | return __ww_mutex_lock_interruptible(lock, ctx); | ||
422 | else | ||
423 | return mutex_lock_interruptible(&lock->base); | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex | ||
428 | * @lock: the mutex to be acquired | ||
429 | * @ctx: w/w acquire context | ||
430 | * | ||
431 | * Acquires a w/w mutex with the given context after a wound case. This function | ||
432 | * will sleep until the lock becomes available. | ||
433 | * | ||
434 | * The caller must have released all w/w mutexes already acquired with the | ||
435 | * context and then call this function on the contended lock. | ||
436 | * | ||
437 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it | ||
438 | * needs with ww_mutex_lock. Note that the -EALREADY return code from | ||
439 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. | ||
440 | * | ||
441 | * It is forbidden to call this function with any other w/w mutexes associated | ||
442 | * with the context held. It is forbidden to call this on anything else than the | ||
443 | * contending mutex. | ||
444 | * | ||
445 | * Note that the slowpath lock acquiring can also be done by calling | ||
446 | * ww_mutex_lock directly. This function here is simply to help w/w mutex | ||
447 | * locking code readability by clearly denoting the slowpath. | ||
448 | */ | ||
449 | static inline void | ||
450 | ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
451 | { | ||
452 | int ret; | ||
453 | #ifdef CONFIG_DEBUG_MUTEXES | ||
454 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); | ||
455 | #endif | ||
456 | ret = ww_mutex_lock(lock, ctx); | ||
457 | (void)ret; | ||
458 | } | ||
459 | |||
460 | /** | ||
461 | * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, | ||
462 | * interruptible | ||
463 | * @lock: the mutex to be acquired | ||
464 | * @ctx: w/w acquire context | ||
465 | * | ||
466 | * Acquires a w/w mutex with the given context after a wound case. This function | ||
467 | * will sleep until the lock becomes available and returns 0 when the lock has | ||
468 | * been acquired. If a signal arrives while waiting for the lock then this | ||
469 | * function returns -EINTR. | ||
470 | * | ||
471 | * The caller must have released all w/w mutexes already acquired with the | ||
472 | * context and then call this function on the contended lock. | ||
473 | * | ||
474 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it | ||
475 | * needs with ww_mutex_lock. Note that the -EALREADY return code from | ||
476 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. | ||
477 | * | ||
478 | * It is forbidden to call this function with any other w/w mutexes associated | ||
479 | * with the given context held. It is forbidden to call this on anything else | ||
480 | * than the contending mutex. | ||
481 | * | ||
482 | * Note that the slowpath lock acquiring can also be done by calling | ||
483 | * ww_mutex_lock_interruptible directly. This function here is simply to help | ||
484 | * w/w mutex locking code readability by clearly denoting the slowpath. | ||
485 | */ | ||
486 | static inline int __must_check | ||
487 | ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, | ||
488 | struct ww_acquire_ctx *ctx) | ||
489 | { | ||
490 | #ifdef CONFIG_DEBUG_MUTEXES | ||
491 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); | ||
492 | #endif | ||
493 | return ww_mutex_lock_interruptible(lock, ctx); | ||
494 | } | ||
495 | |||
496 | extern void ww_mutex_unlock(struct ww_mutex *lock); | ||
497 | |||
498 | /** | ||
499 | * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context | ||
500 | * @lock: mutex to lock | ||
501 | * | ||
502 | * Trylocks a mutex without acquire context, so no deadlock detection is | ||
503 | * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. | ||
504 | */ | ||
505 | static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) | ||
506 | { | ||
507 | return mutex_trylock(&lock->base); | ||
508 | } | ||
509 | |||
510 | /*** | ||
511 | * ww_mutex_destroy - mark a w/w mutex unusable | ||
512 | * @lock: the mutex to be destroyed | ||
513 | * | ||
514 | * This function marks the mutex uninitialized, and any subsequent | ||
515 | * use of the mutex is forbidden. The mutex must not be locked when | ||
516 | * this function is called. | ||
517 | */ | ||
518 | static inline void ww_mutex_destroy(struct ww_mutex *lock) | ||
519 | { | ||
520 | mutex_destroy(&lock->base); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * ww_mutex_is_locked - is the w/w mutex locked | ||
525 | * @lock: the mutex to be queried | ||
526 | * | ||
527 | * Returns 1 if the mutex is locked, 0 if unlocked. | ||
528 | */ | ||
529 | static inline bool ww_mutex_is_locked(struct ww_mutex *lock) | ||
530 | { | ||
531 | return mutex_is_locked(&lock->base); | ||
532 | } | ||
533 | |||
534 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | 176 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); |
535 | 177 | ||
536 | #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX | 178 | #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX |
diff --git a/include/linux/reservation.h b/include/linux/reservation.h index e9ee806a9d72..813dae960ebd 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h | |||
@@ -39,7 +39,7 @@ | |||
39 | #ifndef _LINUX_RESERVATION_H | 39 | #ifndef _LINUX_RESERVATION_H |
40 | #define _LINUX_RESERVATION_H | 40 | #define _LINUX_RESERVATION_H |
41 | 41 | ||
42 | #include <linux/mutex.h> | 42 | #include <linux/ww_mutex.h> |
43 | 43 | ||
44 | extern struct ww_class reservation_ww_class; | 44 | extern struct ww_class reservation_ww_class; |
45 | 45 | ||
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h new file mode 100644 index 000000000000..760399a470bd --- /dev/null +++ b/include/linux/ww_mutex.h | |||
@@ -0,0 +1,378 @@ | |||
1 | /* | ||
2 | * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance | ||
3 | * | ||
4 | * Original mutex implementation started by Ingo Molnar: | ||
5 | * | ||
6 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * | ||
8 | * Wound/wait implementation: | ||
9 | * Copyright (C) 2013 Canonical Ltd. | ||
10 | * | ||
11 | * This file contains the main data structure and API definitions. | ||
12 | */ | ||
13 | |||
14 | #ifndef __LINUX_WW_MUTEX_H | ||
15 | #define __LINUX_WW_MUTEX_H | ||
16 | |||
17 | #include <linux/mutex.h> | ||
18 | |||
19 | struct ww_class { | ||
20 | atomic_long_t stamp; | ||
21 | struct lock_class_key acquire_key; | ||
22 | struct lock_class_key mutex_key; | ||
23 | const char *acquire_name; | ||
24 | const char *mutex_name; | ||
25 | }; | ||
26 | |||
27 | struct ww_acquire_ctx { | ||
28 | struct task_struct *task; | ||
29 | unsigned long stamp; | ||
30 | unsigned acquired; | ||
31 | #ifdef CONFIG_DEBUG_MUTEXES | ||
32 | unsigned done_acquire; | ||
33 | struct ww_class *ww_class; | ||
34 | struct ww_mutex *contending_lock; | ||
35 | #endif | ||
36 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
37 | struct lockdep_map dep_map; | ||
38 | #endif | ||
39 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
40 | unsigned deadlock_inject_interval; | ||
41 | unsigned deadlock_inject_countdown; | ||
42 | #endif | ||
43 | }; | ||
44 | |||
45 | struct ww_mutex { | ||
46 | struct mutex base; | ||
47 | struct ww_acquire_ctx *ctx; | ||
48 | #ifdef CONFIG_DEBUG_MUTEXES | ||
49 | struct ww_class *ww_class; | ||
50 | #endif | ||
51 | }; | ||
52 | |||
53 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
54 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ | ||
55 | , .ww_class = &ww_class | ||
56 | #else | ||
57 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) | ||
58 | #endif | ||
59 | |||
60 | #define __WW_CLASS_INITIALIZER(ww_class) \ | ||
61 | { .stamp = ATOMIC_LONG_INIT(0) \ | ||
62 | , .acquire_name = #ww_class "_acquire" \ | ||
63 | , .mutex_name = #ww_class "_mutex" } | ||
64 | |||
65 | #define __WW_MUTEX_INITIALIZER(lockname, class) \ | ||
66 | { .base = { \__MUTEX_INITIALIZER(lockname) } \ | ||
67 | __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } | ||
68 | |||
69 | #define DEFINE_WW_CLASS(classname) \ | ||
70 | struct ww_class classname = __WW_CLASS_INITIALIZER(classname) | ||
71 | |||
72 | #define DEFINE_WW_MUTEX(mutexname, ww_class) \ | ||
73 | struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) | ||
74 | |||
75 | /** | ||
76 | * ww_mutex_init - initialize the w/w mutex | ||
77 | * @lock: the mutex to be initialized | ||
78 | * @ww_class: the w/w class the mutex should belong to | ||
79 | * | ||
80 | * Initialize the w/w mutex to unlocked state and associate it with the given | ||
81 | * class. | ||
82 | * | ||
83 | * It is not allowed to initialize an already locked mutex. | ||
84 | */ | ||
85 | static inline void ww_mutex_init(struct ww_mutex *lock, | ||
86 | struct ww_class *ww_class) | ||
87 | { | ||
88 | __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); | ||
89 | lock->ctx = NULL; | ||
90 | #ifdef CONFIG_DEBUG_MUTEXES | ||
91 | lock->ww_class = ww_class; | ||
92 | #endif | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * ww_acquire_init - initialize a w/w acquire context | ||
97 | * @ctx: w/w acquire context to initialize | ||
98 | * @ww_class: w/w class of the context | ||
99 | * | ||
100 | * Initializes an context to acquire multiple mutexes of the given w/w class. | ||
101 | * | ||
102 | * Context-based w/w mutex acquiring can be done in any order whatsoever within | ||
103 | * a given lock class. Deadlocks will be detected and handled with the | ||
104 | * wait/wound logic. | ||
105 | * | ||
106 | * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can | ||
107 | * result in undetected deadlocks and is so forbidden. Mixing different contexts | ||
108 | * for the same w/w class when acquiring mutexes can also result in undetected | ||
109 | * deadlocks, and is hence also forbidden. Both types of abuse will be caught by | ||
110 | * enabling CONFIG_PROVE_LOCKING. | ||
111 | * | ||
112 | * Nesting of acquire contexts for _different_ w/w classes is possible, subject | ||
113 | * to the usual locking rules between different lock classes. | ||
114 | * | ||
115 | * An acquire context must be released with ww_acquire_fini by the same task | ||
116 | * before the memory is freed. It is recommended to allocate the context itself | ||
117 | * on the stack. | ||
118 | */ | ||
119 | static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, | ||
120 | struct ww_class *ww_class) | ||
121 | { | ||
122 | ctx->task = current; | ||
123 | ctx->stamp = atomic_long_inc_return(&ww_class->stamp); | ||
124 | ctx->acquired = 0; | ||
125 | #ifdef CONFIG_DEBUG_MUTEXES | ||
126 | ctx->ww_class = ww_class; | ||
127 | ctx->done_acquire = 0; | ||
128 | ctx->contending_lock = NULL; | ||
129 | #endif | ||
130 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
131 | debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); | ||
132 | lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, | ||
133 | &ww_class->acquire_key, 0); | ||
134 | mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); | ||
135 | #endif | ||
136 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
137 | ctx->deadlock_inject_interval = 1; | ||
138 | ctx->deadlock_inject_countdown = ctx->stamp & 0xf; | ||
139 | #endif | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * ww_acquire_done - marks the end of the acquire phase | ||
144 | * @ctx: the acquire context | ||
145 | * | ||
146 | * Marks the end of the acquire phase, any further w/w mutex lock calls using | ||
147 | * this context are forbidden. | ||
148 | * | ||
149 | * Calling this function is optional, it is just useful to document w/w mutex | ||
150 | * code and clearly designated the acquire phase from actually using the locked | ||
151 | * data structures. | ||
152 | */ | ||
153 | static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) | ||
154 | { | ||
155 | #ifdef CONFIG_DEBUG_MUTEXES | ||
156 | lockdep_assert_held(ctx); | ||
157 | |||
158 | DEBUG_LOCKS_WARN_ON(ctx->done_acquire); | ||
159 | ctx->done_acquire = 1; | ||
160 | #endif | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * ww_acquire_fini - releases a w/w acquire context | ||
165 | * @ctx: the acquire context to free | ||
166 | * | ||
167 | * Releases a w/w acquire context. This must be called _after_ all acquired w/w | ||
168 | * mutexes have been released with ww_mutex_unlock. | ||
169 | */ | ||
170 | static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) | ||
171 | { | ||
172 | #ifdef CONFIG_DEBUG_MUTEXES | ||
173 | mutex_release(&ctx->dep_map, 0, _THIS_IP_); | ||
174 | |||
175 | DEBUG_LOCKS_WARN_ON(ctx->acquired); | ||
176 | if (!config_enabled(CONFIG_PROVE_LOCKING)) | ||
177 | /* | ||
178 | * lockdep will normally handle this, | ||
179 | * but fail without anyway | ||
180 | */ | ||
181 | ctx->done_acquire = 1; | ||
182 | |||
183 | if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) | ||
184 | /* ensure ww_acquire_fini will still fail if called twice */ | ||
185 | ctx->acquired = ~0U; | ||
186 | #endif | ||
187 | } | ||
188 | |||
189 | extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, | ||
190 | struct ww_acquire_ctx *ctx); | ||
191 | extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, | ||
192 | struct ww_acquire_ctx *ctx); | ||
193 | |||
194 | /** | ||
195 | * ww_mutex_lock - acquire the w/w mutex | ||
196 | * @lock: the mutex to be acquired | ||
197 | * @ctx: w/w acquire context, or NULL to acquire only a single lock. | ||
198 | * | ||
199 | * Lock the w/w mutex exclusively for this task. | ||
200 | * | ||
201 | * Deadlocks within a given w/w class of locks are detected and handled with the | ||
202 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | ||
203 | * will either sleep until it is (wait case). Or it selects the current context | ||
204 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | ||
205 | * same lock with the same context twice is also detected and signalled by | ||
206 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. | ||
207 | * | ||
208 | * In the wound case the caller must release all currently held w/w mutexes for | ||
209 | * the given context and then wait for this contending lock to be available by | ||
210 | * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this | ||
211 | * lock and proceed with trying to acquire further w/w mutexes (e.g. when | ||
212 | * scanning through lru lists trying to free resources). | ||
213 | * | ||
214 | * The mutex must later on be released by the same task that | ||
215 | * acquired it. The task may not exit without first unlocking the mutex. Also, | ||
216 | * kernel memory where the mutex resides must not be freed with the mutex still | ||
217 | * locked. The mutex must first be initialized (or statically defined) before it | ||
218 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be | ||
219 | * of the same w/w lock class as was used to initialize the acquire context. | ||
220 | * | ||
221 | * A mutex acquired with this function must be released with ww_mutex_unlock. | ||
222 | */ | ||
223 | static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
224 | { | ||
225 | if (ctx) | ||
226 | return __ww_mutex_lock(lock, ctx); | ||
227 | |||
228 | mutex_lock(&lock->base); | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible | ||
234 | * @lock: the mutex to be acquired | ||
235 | * @ctx: w/w acquire context | ||
236 | * | ||
237 | * Lock the w/w mutex exclusively for this task. | ||
238 | * | ||
239 | * Deadlocks within a given w/w class of locks are detected and handled with the | ||
240 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | ||
241 | * will either sleep until it is (wait case). Or it selects the current context | ||
242 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | ||
243 | * same lock with the same context twice is also detected and signalled by | ||
244 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a | ||
245 | * signal arrives while waiting for the lock then this function returns -EINTR. | ||
246 | * | ||
247 | * In the wound case the caller must release all currently held w/w mutexes for | ||
248 | * the given context and then wait for this contending lock to be available by | ||
249 | * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to | ||
250 | * not acquire this lock and proceed with trying to acquire further w/w mutexes | ||
251 | * (e.g. when scanning through lru lists trying to free resources). | ||
252 | * | ||
253 | * The mutex must later on be released by the same task that | ||
254 | * acquired it. The task may not exit without first unlocking the mutex. Also, | ||
255 | * kernel memory where the mutex resides must not be freed with the mutex still | ||
256 | * locked. The mutex must first be initialized (or statically defined) before it | ||
257 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be | ||
258 | * of the same w/w lock class as was used to initialize the acquire context. | ||
259 | * | ||
260 | * A mutex acquired with this function must be released with ww_mutex_unlock. | ||
261 | */ | ||
262 | static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, | ||
263 | struct ww_acquire_ctx *ctx) | ||
264 | { | ||
265 | if (ctx) | ||
266 | return __ww_mutex_lock_interruptible(lock, ctx); | ||
267 | else | ||
268 | return mutex_lock_interruptible(&lock->base); | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex | ||
273 | * @lock: the mutex to be acquired | ||
274 | * @ctx: w/w acquire context | ||
275 | * | ||
276 | * Acquires a w/w mutex with the given context after a wound case. This function | ||
277 | * will sleep until the lock becomes available. | ||
278 | * | ||
279 | * The caller must have released all w/w mutexes already acquired with the | ||
280 | * context and then call this function on the contended lock. | ||
281 | * | ||
282 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it | ||
283 | * needs with ww_mutex_lock. Note that the -EALREADY return code from | ||
284 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. | ||
285 | * | ||
286 | * It is forbidden to call this function with any other w/w mutexes associated | ||
287 | * with the context held. It is forbidden to call this on anything else than the | ||
288 | * contending mutex. | ||
289 | * | ||
290 | * Note that the slowpath lock acquiring can also be done by calling | ||
291 | * ww_mutex_lock directly. This function here is simply to help w/w mutex | ||
292 | * locking code readability by clearly denoting the slowpath. | ||
293 | */ | ||
294 | static inline void | ||
295 | ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
296 | { | ||
297 | int ret; | ||
298 | #ifdef CONFIG_DEBUG_MUTEXES | ||
299 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); | ||
300 | #endif | ||
301 | ret = ww_mutex_lock(lock, ctx); | ||
302 | (void)ret; | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible | ||
307 | * @lock: the mutex to be acquired | ||
308 | * @ctx: w/w acquire context | ||
309 | * | ||
310 | * Acquires a w/w mutex with the given context after a wound case. This function | ||
311 | * will sleep until the lock becomes available and returns 0 when the lock has | ||
312 | * been acquired. If a signal arrives while waiting for the lock then this | ||
313 | * function returns -EINTR. | ||
314 | * | ||
315 | * The caller must have released all w/w mutexes already acquired with the | ||
316 | * context and then call this function on the contended lock. | ||
317 | * | ||
318 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it | ||
319 | * needs with ww_mutex_lock. Note that the -EALREADY return code from | ||
320 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. | ||
321 | * | ||
322 | * It is forbidden to call this function with any other w/w mutexes associated | ||
323 | * with the given context held. It is forbidden to call this on anything else | ||
324 | * than the contending mutex. | ||
325 | * | ||
326 | * Note that the slowpath lock acquiring can also be done by calling | ||
327 | * ww_mutex_lock_interruptible directly. This function here is simply to help | ||
328 | * w/w mutex locking code readability by clearly denoting the slowpath. | ||
329 | */ | ||
330 | static inline int __must_check | ||
331 | ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, | ||
332 | struct ww_acquire_ctx *ctx) | ||
333 | { | ||
334 | #ifdef CONFIG_DEBUG_MUTEXES | ||
335 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); | ||
336 | #endif | ||
337 | return ww_mutex_lock_interruptible(lock, ctx); | ||
338 | } | ||
339 | |||
340 | extern void ww_mutex_unlock(struct ww_mutex *lock); | ||
341 | |||
342 | /** | ||
343 | * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context | ||
344 | * @lock: mutex to lock | ||
345 | * | ||
346 | * Trylocks a mutex without acquire context, so no deadlock detection is | ||
347 | * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. | ||
348 | */ | ||
349 | static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) | ||
350 | { | ||
351 | return mutex_trylock(&lock->base); | ||
352 | } | ||
353 | |||
354 | /*** | ||
355 | * ww_mutex_destroy - mark a w/w mutex unusable | ||
356 | * @lock: the mutex to be destroyed | ||
357 | * | ||
358 | * This function marks the mutex uninitialized, and any subsequent | ||
359 | * use of the mutex is forbidden. The mutex must not be locked when | ||
360 | * this function is called. | ||
361 | */ | ||
362 | static inline void ww_mutex_destroy(struct ww_mutex *lock) | ||
363 | { | ||
364 | mutex_destroy(&lock->base); | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * ww_mutex_is_locked - is the w/w mutex locked | ||
369 | * @lock: the mutex to be queried | ||
370 | * | ||
371 | * Returns 1 if the mutex is locked, 0 if unlocked. | ||
372 | */ | ||
373 | static inline bool ww_mutex_is_locked(struct ww_mutex *lock) | ||
374 | { | ||
375 | return mutex_is_locked(&lock->base); | ||
376 | } | ||
377 | |||
378 | #endif | ||