aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:09:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-02 19:09:13 -0400
commit0c46d68d1930c8a58d0f291328b9759da754e599 (patch)
tree5cc551f56a86e204d648488c036fc1092fcd22a0 /include
parent3e42dee676e8cf5adca817b1518b2e99d1c138ff (diff)
parent166989e366ffa66108b2f37b870e66b85b2185ad (diff)
Merge branch 'core-mutexes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull WW mutex support from Ingo Molnar: "This tree adds support for wound/wait style locks, which the graphics guys would like to make use of in the TTM graphics subsystem. Wound/wait mutexes are used when other multiple lock acquisitions of a similar type can be done in an arbitrary order. The deadlock handling used here is called wait/wound in the RDBMS literature: The older tasks waits until it can acquire the contended lock. The younger tasks needs to back off and drop all the locks it is currently holding, ie the younger task is wounded. See this LWN.net description of W/W mutexes: https://lwn.net/Articles/548909/ The comments there outline specific usecases for this facility (which have already been implemented for the DRM tree). Also see Documentation/ww-mutex-design.txt for more details" * 'core-mutexes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking-selftests: Handle unexpected failures more strictly mutex: Add more w/w tests to test EDEADLK path handling mutex: Add more tests to lib/locking-selftest.c mutex: Add w/w tests to lib/locking-selftest.c mutex: Add w/w mutex slowpath debugging mutex: Add support for wound/wait style locks arch: Make __mutex_fastpath_lock_retval return whether fastpath succeeded or not
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/mutex-dec.h10
-rw-r--r--include/asm-generic/mutex-null.h2
-rw-r--r--include/asm-generic/mutex-xchg.h10
-rw-r--r--include/linux/mutex-debug.h1
-rw-r--r--include/linux/mutex.h363
5 files changed, 372 insertions, 14 deletions
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index f104af7cf437..d4f9fb4e53df 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
29 * from 1 to a 0 value 29 * from 1 to a 0 value
30 * @count: pointer of type atomic_t 30 * @count: pointer of type atomic_t
31 * @fail_fn: function to call if the original value was not 1
32 * 31 *
33 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 32 * Change the count from 1 to a value lower than 1. This function returns 0
34 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 33 * if the fastpath succeeds, or -1 otherwise.
35 * or anything the slow path function returns.
36 */ 34 */
37static inline int 35static inline int
38__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 36__mutex_fastpath_lock_retval(atomic_t *count)
39{ 37{
40 if (unlikely(atomic_dec_return(count) < 0)) 38 if (unlikely(atomic_dec_return(count) < 0))
41 return fail_fn(count); 39 return -1;
42 return 0; 40 return 0;
43} 41}
44 42
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
index e1bbbc72b6a2..61069ed334e2 100644
--- a/include/asm-generic/mutex-null.h
+++ b/include/asm-generic/mutex-null.h
@@ -11,7 +11,7 @@
11#define _ASM_GENERIC_MUTEX_NULL_H 11#define _ASM_GENERIC_MUTEX_NULL_H
12 12
13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) 13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
14#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) 14#define __mutex_fastpath_lock_retval(count) (-1)
15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) 15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) 16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
17#define __mutex_slowpath_needs_to_unlock() 1 17#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index c04e0db8a2d6..f169ec064785 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
40 * from 1 to a 0 value 40 * from 1 to a 0 value
41 * @count: pointer of type atomic_t 41 * @count: pointer of type atomic_t
42 * @fail_fn: function to call if the original value was not 1
43 * 42 *
44 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 43 * Change the count from 1 to a value lower than 1. This function returns 0
45 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 44 * if the fastpath succeeds, or -1 otherwise.
46 * or anything the slow path function returns
47 */ 45 */
48static inline int 46static inline int
49__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 47__mutex_fastpath_lock_retval(atomic_t *count)
50{ 48{
51 if (unlikely(atomic_xchg(count, 0) != 1)) 49 if (unlikely(atomic_xchg(count, 0) != 1))
52 if (likely(atomic_xchg(count, -1) != 1)) 50 if (likely(atomic_xchg(count, -1) != 1))
53 return fail_fn(count); 51 return -1;
54 return 0; 52 return 0;
55} 53}
56 54
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 731d77d6e155..4ac8b1977b73 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/lockdep.h> 5#include <linux/lockdep.h>
6#include <linux/debug_locks.h>
6 7
7/* 8/*
8 * Mutexes - debugging helpers: 9 * Mutexes - debugging helpers:
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 433da8a1a426..3793ed7feeeb 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -10,6 +10,7 @@
10#ifndef __LINUX_MUTEX_H 10#ifndef __LINUX_MUTEX_H
11#define __LINUX_MUTEX_H 11#define __LINUX_MUTEX_H
12 12
13#include <asm/current.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
15#include <linux/linkage.h> 16#include <linux/linkage.h>
@@ -77,6 +78,40 @@ struct mutex_waiter {
77#endif 78#endif
78}; 79};
79 80
81struct ww_class {
82 atomic_long_t stamp;
83 struct lock_class_key acquire_key;
84 struct lock_class_key mutex_key;
85 const char *acquire_name;
86 const char *mutex_name;
87};
88
89struct ww_acquire_ctx {
90 struct task_struct *task;
91 unsigned long stamp;
92 unsigned acquired;
93#ifdef CONFIG_DEBUG_MUTEXES
94 unsigned done_acquire;
95 struct ww_class *ww_class;
96 struct ww_mutex *contending_lock;
97#endif
98#ifdef CONFIG_DEBUG_LOCK_ALLOC
99 struct lockdep_map dep_map;
100#endif
101#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
102 unsigned deadlock_inject_interval;
103 unsigned deadlock_inject_countdown;
104#endif
105};
106
107struct ww_mutex {
108 struct mutex base;
109 struct ww_acquire_ctx *ctx;
110#ifdef CONFIG_DEBUG_MUTEXES
111 struct ww_class *ww_class;
112#endif
113};
114
80#ifdef CONFIG_DEBUG_MUTEXES 115#ifdef CONFIG_DEBUG_MUTEXES
81# include <linux/mutex-debug.h> 116# include <linux/mutex-debug.h>
82#else 117#else
@@ -101,8 +136,11 @@ static inline void mutex_destroy(struct mutex *lock) {}
101#ifdef CONFIG_DEBUG_LOCK_ALLOC 136#ifdef CONFIG_DEBUG_LOCK_ALLOC
102# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 137# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
103 , .dep_map = { .name = #lockname } 138 , .dep_map = { .name = #lockname }
139# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
140 , .ww_class = &ww_class
104#else 141#else
105# define __DEP_MAP_MUTEX_INITIALIZER(lockname) 142# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
143# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
106#endif 144#endif
107 145
108#define __MUTEX_INITIALIZER(lockname) \ 146#define __MUTEX_INITIALIZER(lockname) \
@@ -112,13 +150,49 @@ static inline void mutex_destroy(struct mutex *lock) {}
112 __DEBUG_MUTEX_INITIALIZER(lockname) \ 150 __DEBUG_MUTEX_INITIALIZER(lockname) \
113 __DEP_MAP_MUTEX_INITIALIZER(lockname) } 151 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
114 152
153#define __WW_CLASS_INITIALIZER(ww_class) \
154 { .stamp = ATOMIC_LONG_INIT(0) \
155 , .acquire_name = #ww_class "_acquire" \
156 , .mutex_name = #ww_class "_mutex" }
157
158#define __WW_MUTEX_INITIALIZER(lockname, class) \
159 { .base = { \__MUTEX_INITIALIZER(lockname) } \
160 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
161
115#define DEFINE_MUTEX(mutexname) \ 162#define DEFINE_MUTEX(mutexname) \
116 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 163 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
117 164
165#define DEFINE_WW_CLASS(classname) \
166 struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
167
168#define DEFINE_WW_MUTEX(mutexname, ww_class) \
169 struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
170
171
118extern void __mutex_init(struct mutex *lock, const char *name, 172extern void __mutex_init(struct mutex *lock, const char *name,
119 struct lock_class_key *key); 173 struct lock_class_key *key);
120 174
121/** 175/**
176 * ww_mutex_init - initialize the w/w mutex
177 * @lock: the mutex to be initialized
178 * @ww_class: the w/w class the mutex should belong to
179 *
180 * Initialize the w/w mutex to unlocked state and associate it with the given
181 * class.
182 *
183 * It is not allowed to initialize an already locked mutex.
184 */
185static inline void ww_mutex_init(struct ww_mutex *lock,
186 struct ww_class *ww_class)
187{
188 __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
189 lock->ctx = NULL;
190#ifdef CONFIG_DEBUG_MUTEXES
191 lock->ww_class = ww_class;
192#endif
193}
194
195/**
122 * mutex_is_locked - is the mutex locked 196 * mutex_is_locked - is the mutex locked
123 * @lock: the mutex to be queried 197 * @lock: the mutex to be queried
124 * 198 *
@@ -136,6 +210,7 @@ static inline int mutex_is_locked(struct mutex *lock)
136#ifdef CONFIG_DEBUG_LOCK_ALLOC 210#ifdef CONFIG_DEBUG_LOCK_ALLOC
137extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 211extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
138extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 212extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
213
139extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 214extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
140 unsigned int subclass); 215 unsigned int subclass);
141extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 216extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
@@ -147,7 +222,7 @@ extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
147 222
148#define mutex_lock_nest_lock(lock, nest_lock) \ 223#define mutex_lock_nest_lock(lock, nest_lock) \
149do { \ 224do { \
150 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ 225 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
151 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 226 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
152} while (0) 227} while (0)
153 228
@@ -170,6 +245,292 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
170 */ 245 */
171extern int mutex_trylock(struct mutex *lock); 246extern int mutex_trylock(struct mutex *lock);
172extern void mutex_unlock(struct mutex *lock); 247extern void mutex_unlock(struct mutex *lock);
248
249/**
250 * ww_acquire_init - initialize a w/w acquire context
251 * @ctx: w/w acquire context to initialize
252 * @ww_class: w/w class of the context
253 *
254 * Initializes an context to acquire multiple mutexes of the given w/w class.
255 *
256 * Context-based w/w mutex acquiring can be done in any order whatsoever within
257 * a given lock class. Deadlocks will be detected and handled with the
258 * wait/wound logic.
259 *
260 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
261 * result in undetected deadlocks and is so forbidden. Mixing different contexts
262 * for the same w/w class when acquiring mutexes can also result in undetected
263 * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
264 * enabling CONFIG_PROVE_LOCKING.
265 *
266 * Nesting of acquire contexts for _different_ w/w classes is possible, subject
267 * to the usual locking rules between different lock classes.
268 *
269 * An acquire context must be released with ww_acquire_fini by the same task
270 * before the memory is freed. It is recommended to allocate the context itself
271 * on the stack.
272 */
273static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
274 struct ww_class *ww_class)
275{
276 ctx->task = current;
277 ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
278 ctx->acquired = 0;
279#ifdef CONFIG_DEBUG_MUTEXES
280 ctx->ww_class = ww_class;
281 ctx->done_acquire = 0;
282 ctx->contending_lock = NULL;
283#endif
284#ifdef CONFIG_DEBUG_LOCK_ALLOC
285 debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
286 lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
287 &ww_class->acquire_key, 0);
288 mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
289#endif
290#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
291 ctx->deadlock_inject_interval = 1;
292 ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
293#endif
294}
295
296/**
297 * ww_acquire_done - marks the end of the acquire phase
298 * @ctx: the acquire context
299 *
300 * Marks the end of the acquire phase, any further w/w mutex lock calls using
301 * this context are forbidden.
302 *
303 * Calling this function is optional, it is just useful to document w/w mutex
304 * code and clearly designated the acquire phase from actually using the locked
305 * data structures.
306 */
307static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
308{
309#ifdef CONFIG_DEBUG_MUTEXES
310 lockdep_assert_held(ctx);
311
312 DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
313 ctx->done_acquire = 1;
314#endif
315}
316
317/**
318 * ww_acquire_fini - releases a w/w acquire context
319 * @ctx: the acquire context to free
320 *
321 * Releases a w/w acquire context. This must be called _after_ all acquired w/w
322 * mutexes have been released with ww_mutex_unlock.
323 */
324static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
325{
326#ifdef CONFIG_DEBUG_MUTEXES
327 mutex_release(&ctx->dep_map, 0, _THIS_IP_);
328
329 DEBUG_LOCKS_WARN_ON(ctx->acquired);
330 if (!config_enabled(CONFIG_PROVE_LOCKING))
331 /*
332 * lockdep will normally handle this,
333 * but fail without anyway
334 */
335 ctx->done_acquire = 1;
336
337 if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
338 /* ensure ww_acquire_fini will still fail if called twice */
339 ctx->acquired = ~0U;
340#endif
341}
342
343extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
344 struct ww_acquire_ctx *ctx);
345extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
346 struct ww_acquire_ctx *ctx);
347
348/**
349 * ww_mutex_lock - acquire the w/w mutex
350 * @lock: the mutex to be acquired
351 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
352 *
353 * Lock the w/w mutex exclusively for this task.
354 *
355 * Deadlocks within a given w/w class of locks are detected and handled with the
356 * wait/wound algorithm. If the lock isn't immediately avaiable this function
357 * will either sleep until it is (wait case). Or it selects the current context
358 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
359 * same lock with the same context twice is also detected and signalled by
360 * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
361 *
362 * In the wound case the caller must release all currently held w/w mutexes for
363 * the given context and then wait for this contending lock to be available by
364 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
365 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
366 * scanning through lru lists trying to free resources).
367 *
368 * The mutex must later on be released by the same task that
369 * acquired it. The task may not exit without first unlocking the mutex. Also,
370 * kernel memory where the mutex resides must not be freed with the mutex still
371 * locked. The mutex must first be initialized (or statically defined) before it
372 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
373 * of the same w/w lock class as was used to initialize the acquire context.
374 *
375 * A mutex acquired with this function must be released with ww_mutex_unlock.
376 */
377static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
378{
379 if (ctx)
380 return __ww_mutex_lock(lock, ctx);
381 else {
382 mutex_lock(&lock->base);
383 return 0;
384 }
385}
386
387/**
388 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
389 * @lock: the mutex to be acquired
390 * @ctx: w/w acquire context
391 *
392 * Lock the w/w mutex exclusively for this task.
393 *
394 * Deadlocks within a given w/w class of locks are detected and handled with the
395 * wait/wound algorithm. If the lock isn't immediately avaiable this function
396 * will either sleep until it is (wait case). Or it selects the current context
397 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
398 * same lock with the same context twice is also detected and signalled by
399 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
400 * signal arrives while waiting for the lock then this function returns -EINTR.
401 *
402 * In the wound case the caller must release all currently held w/w mutexes for
403 * the given context and then wait for this contending lock to be available by
404 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
405 * not acquire this lock and proceed with trying to acquire further w/w mutexes
406 * (e.g. when scanning through lru lists trying to free resources).
407 *
408 * The mutex must later on be released by the same task that
409 * acquired it. The task may not exit without first unlocking the mutex. Also,
410 * kernel memory where the mutex resides must not be freed with the mutex still
411 * locked. The mutex must first be initialized (or statically defined) before it
412 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
413 * of the same w/w lock class as was used to initialize the acquire context.
414 *
415 * A mutex acquired with this function must be released with ww_mutex_unlock.
416 */
417static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
418 struct ww_acquire_ctx *ctx)
419{
420 if (ctx)
421 return __ww_mutex_lock_interruptible(lock, ctx);
422 else
423 return mutex_lock_interruptible(&lock->base);
424}
425
426/**
427 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
428 * @lock: the mutex to be acquired
429 * @ctx: w/w acquire context
430 *
431 * Acquires a w/w mutex with the given context after a wound case. This function
432 * will sleep until the lock becomes available.
433 *
434 * The caller must have released all w/w mutexes already acquired with the
435 * context and then call this function on the contended lock.
436 *
437 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
438 * needs with ww_mutex_lock. Note that the -EALREADY return code from
439 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
440 *
441 * It is forbidden to call this function with any other w/w mutexes associated
442 * with the context held. It is forbidden to call this on anything else than the
443 * contending mutex.
444 *
445 * Note that the slowpath lock acquiring can also be done by calling
446 * ww_mutex_lock directly. This function here is simply to help w/w mutex
447 * locking code readability by clearly denoting the slowpath.
448 */
449static inline void
450ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
451{
452 int ret;
453#ifdef CONFIG_DEBUG_MUTEXES
454 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
455#endif
456 ret = ww_mutex_lock(lock, ctx);
457 (void)ret;
458}
459
460/**
461 * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex,
462 * interruptible
463 * @lock: the mutex to be acquired
464 * @ctx: w/w acquire context
465 *
466 * Acquires a w/w mutex with the given context after a wound case. This function
467 * will sleep until the lock becomes available and returns 0 when the lock has
468 * been acquired. If a signal arrives while waiting for the lock then this
469 * function returns -EINTR.
470 *
471 * The caller must have released all w/w mutexes already acquired with the
472 * context and then call this function on the contended lock.
473 *
474 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
475 * needs with ww_mutex_lock. Note that the -EALREADY return code from
476 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
477 *
478 * It is forbidden to call this function with any other w/w mutexes associated
479 * with the given context held. It is forbidden to call this on anything else
480 * than the contending mutex.
481 *
482 * Note that the slowpath lock acquiring can also be done by calling
483 * ww_mutex_lock_interruptible directly. This function here is simply to help
484 * w/w mutex locking code readability by clearly denoting the slowpath.
485 */
486static inline int __must_check
487ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
488 struct ww_acquire_ctx *ctx)
489{
490#ifdef CONFIG_DEBUG_MUTEXES
491 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
492#endif
493 return ww_mutex_lock_interruptible(lock, ctx);
494}
495
496extern void ww_mutex_unlock(struct ww_mutex *lock);
497
498/**
499 * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
500 * @lock: mutex to lock
501 *
502 * Trylocks a mutex without acquire context, so no deadlock detection is
503 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
504 */
505static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
506{
507 return mutex_trylock(&lock->base);
508}
509
510/***
511 * ww_mutex_destroy - mark a w/w mutex unusable
512 * @lock: the mutex to be destroyed
513 *
514 * This function marks the mutex uninitialized, and any subsequent
515 * use of the mutex is forbidden. The mutex must not be locked when
516 * this function is called.
517 */
518static inline void ww_mutex_destroy(struct ww_mutex *lock)
519{
520 mutex_destroy(&lock->base);
521}
522
523/**
524 * ww_mutex_is_locked - is the w/w mutex locked
525 * @lock: the mutex to be queried
526 *
527 * Returns 1 if the mutex is locked, 0 if unlocked.
528 */
529static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
530{
531 return mutex_is_locked(&lock->base);
532}
533
173extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 534extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
174 535
175#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX 536#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX