diff options
Diffstat (limited to 'include/linux/ww_mutex.h')
| -rw-r--r-- | include/linux/ww_mutex.h | 45 |
1 files changed, 28 insertions, 17 deletions
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index 39fda195bf78..3af7c0e03be5 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h | |||
| @@ -6,8 +6,10 @@ | |||
| 6 | * | 6 | * |
| 7 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | 7 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 8 | * | 8 | * |
| 9 | * Wound/wait implementation: | 9 | * Wait/Die implementation: |
| 10 | * Copyright (C) 2013 Canonical Ltd. | 10 | * Copyright (C) 2013 Canonical Ltd. |
| 11 | * Choice of algorithm: | ||
| 12 | * Copyright (C) 2018 WMWare Inc. | ||
| 11 | * | 13 | * |
| 12 | * This file contains the main data structure and API definitions. | 14 | * This file contains the main data structure and API definitions. |
| 13 | */ | 15 | */ |
| @@ -23,14 +25,17 @@ struct ww_class { | |||
| 23 | struct lock_class_key mutex_key; | 25 | struct lock_class_key mutex_key; |
| 24 | const char *acquire_name; | 26 | const char *acquire_name; |
| 25 | const char *mutex_name; | 27 | const char *mutex_name; |
| 28 | unsigned int is_wait_die; | ||
| 26 | }; | 29 | }; |
| 27 | 30 | ||
| 28 | struct ww_acquire_ctx { | 31 | struct ww_acquire_ctx { |
| 29 | struct task_struct *task; | 32 | struct task_struct *task; |
| 30 | unsigned long stamp; | 33 | unsigned long stamp; |
| 31 | unsigned acquired; | 34 | unsigned int acquired; |
| 35 | unsigned short wounded; | ||
| 36 | unsigned short is_wait_die; | ||
| 32 | #ifdef CONFIG_DEBUG_MUTEXES | 37 | #ifdef CONFIG_DEBUG_MUTEXES |
| 33 | unsigned done_acquire; | 38 | unsigned int done_acquire; |
| 34 | struct ww_class *ww_class; | 39 | struct ww_class *ww_class; |
| 35 | struct ww_mutex *contending_lock; | 40 | struct ww_mutex *contending_lock; |
| 36 | #endif | 41 | #endif |
| @@ -38,8 +43,8 @@ struct ww_acquire_ctx { | |||
| 38 | struct lockdep_map dep_map; | 43 | struct lockdep_map dep_map; |
| 39 | #endif | 44 | #endif |
| 40 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | 45 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
| 41 | unsigned deadlock_inject_interval; | 46 | unsigned int deadlock_inject_interval; |
| 42 | unsigned deadlock_inject_countdown; | 47 | unsigned int deadlock_inject_countdown; |
| 43 | #endif | 48 | #endif |
| 44 | }; | 49 | }; |
| 45 | 50 | ||
| @@ -58,17 +63,21 @@ struct ww_mutex { | |||
| 58 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) | 63 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) |
| 59 | #endif | 64 | #endif |
| 60 | 65 | ||
| 61 | #define __WW_CLASS_INITIALIZER(ww_class) \ | 66 | #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \ |
| 62 | { .stamp = ATOMIC_LONG_INIT(0) \ | 67 | { .stamp = ATOMIC_LONG_INIT(0) \ |
| 63 | , .acquire_name = #ww_class "_acquire" \ | 68 | , .acquire_name = #ww_class "_acquire" \ |
| 64 | , .mutex_name = #ww_class "_mutex" } | 69 | , .mutex_name = #ww_class "_mutex" \ |
| 70 | , .is_wait_die = _is_wait_die } | ||
| 65 | 71 | ||
| 66 | #define __WW_MUTEX_INITIALIZER(lockname, class) \ | 72 | #define __WW_MUTEX_INITIALIZER(lockname, class) \ |
| 67 | { .base = __MUTEX_INITIALIZER(lockname.base) \ | 73 | { .base = __MUTEX_INITIALIZER(lockname.base) \ |
| 68 | __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } | 74 | __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } |
| 69 | 75 | ||
| 76 | #define DEFINE_WD_CLASS(classname) \ | ||
| 77 | struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1) | ||
| 78 | |||
| 70 | #define DEFINE_WW_CLASS(classname) \ | 79 | #define DEFINE_WW_CLASS(classname) \ |
| 71 | struct ww_class classname = __WW_CLASS_INITIALIZER(classname) | 80 | struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0) |
| 72 | 81 | ||
| 73 | #define DEFINE_WW_MUTEX(mutexname, ww_class) \ | 82 | #define DEFINE_WW_MUTEX(mutexname, ww_class) \ |
| 74 | struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) | 83 | struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) |
| @@ -102,7 +111,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock, | |||
| 102 | * | 111 | * |
| 103 | * Context-based w/w mutex acquiring can be done in any order whatsoever within | 112 | * Context-based w/w mutex acquiring can be done in any order whatsoever within |
| 104 | * a given lock class. Deadlocks will be detected and handled with the | 113 | * a given lock class. Deadlocks will be detected and handled with the |
| 105 | * wait/wound logic. | 114 | * wait/die logic. |
| 106 | * | 115 | * |
| 107 | * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can | 116 | * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can |
| 108 | * result in undetected deadlocks and is so forbidden. Mixing different contexts | 117 | * result in undetected deadlocks and is so forbidden. Mixing different contexts |
| @@ -123,6 +132,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, | |||
| 123 | ctx->task = current; | 132 | ctx->task = current; |
| 124 | ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp); | 133 | ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp); |
| 125 | ctx->acquired = 0; | 134 | ctx->acquired = 0; |
| 135 | ctx->wounded = false; | ||
| 136 | ctx->is_wait_die = ww_class->is_wait_die; | ||
| 126 | #ifdef CONFIG_DEBUG_MUTEXES | 137 | #ifdef CONFIG_DEBUG_MUTEXES |
| 127 | ctx->ww_class = ww_class; | 138 | ctx->ww_class = ww_class; |
| 128 | ctx->done_acquire = 0; | 139 | ctx->done_acquire = 0; |
| @@ -195,13 +206,13 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) | |||
| 195 | * Lock the w/w mutex exclusively for this task. | 206 | * Lock the w/w mutex exclusively for this task. |
| 196 | * | 207 | * |
| 197 | * Deadlocks within a given w/w class of locks are detected and handled with the | 208 | * Deadlocks within a given w/w class of locks are detected and handled with the |
| 198 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | 209 | * wait/die algorithm. If the lock isn't immediately available this function |
| 199 | * will either sleep until it is (wait case). Or it selects the current context | 210 | * will either sleep until it is (wait case). Or it selects the current context |
| 200 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | 211 | * for backing off by returning -EDEADLK (die case). Trying to acquire the |
| 201 | * same lock with the same context twice is also detected and signalled by | 212 | * same lock with the same context twice is also detected and signalled by |
| 202 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. | 213 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. |
| 203 | * | 214 | * |
| 204 | * In the wound case the caller must release all currently held w/w mutexes for | 215 | * In the die case the caller must release all currently held w/w mutexes for |
| 205 | * the given context and then wait for this contending lock to be available by | 216 | * the given context and then wait for this contending lock to be available by |
| 206 | * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this | 217 | * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this |
| 207 | * lock and proceed with trying to acquire further w/w mutexes (e.g. when | 218 | * lock and proceed with trying to acquire further w/w mutexes (e.g. when |
| @@ -226,14 +237,14 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq | |||
| 226 | * Lock the w/w mutex exclusively for this task. | 237 | * Lock the w/w mutex exclusively for this task. |
| 227 | * | 238 | * |
| 228 | * Deadlocks within a given w/w class of locks are detected and handled with the | 239 | * Deadlocks within a given w/w class of locks are detected and handled with the |
| 229 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | 240 | * wait/die algorithm. If the lock isn't immediately available this function |
| 230 | * will either sleep until it is (wait case). Or it selects the current context | 241 | * will either sleep until it is (wait case). Or it selects the current context |
| 231 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | 242 | * for backing off by returning -EDEADLK (die case). Trying to acquire the |
| 232 | * same lock with the same context twice is also detected and signalled by | 243 | * same lock with the same context twice is also detected and signalled by |
| 233 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a | 244 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a |
| 234 | * signal arrives while waiting for the lock then this function returns -EINTR. | 245 | * signal arrives while waiting for the lock then this function returns -EINTR. |
| 235 | * | 246 | * |
| 236 | * In the wound case the caller must release all currently held w/w mutexes for | 247 | * In the die case the caller must release all currently held w/w mutexes for |
| 237 | * the given context and then wait for this contending lock to be available by | 248 | * the given context and then wait for this contending lock to be available by |
| 238 | * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to | 249 | * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to |
| 239 | * not acquire this lock and proceed with trying to acquire further w/w mutexes | 250 | * not acquire this lock and proceed with trying to acquire further w/w mutexes |
| @@ -256,7 +267,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, | |||
| 256 | * @lock: the mutex to be acquired | 267 | * @lock: the mutex to be acquired |
| 257 | * @ctx: w/w acquire context | 268 | * @ctx: w/w acquire context |
| 258 | * | 269 | * |
| 259 | * Acquires a w/w mutex with the given context after a wound case. This function | 270 | * Acquires a w/w mutex with the given context after a die case. This function |
| 260 | * will sleep until the lock becomes available. | 271 | * will sleep until the lock becomes available. |
| 261 | * | 272 | * |
| 262 | * The caller must have released all w/w mutexes already acquired with the | 273 | * The caller must have released all w/w mutexes already acquired with the |
| @@ -290,7 +301,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
| 290 | * @lock: the mutex to be acquired | 301 | * @lock: the mutex to be acquired |
| 291 | * @ctx: w/w acquire context | 302 | * @ctx: w/w acquire context |
| 292 | * | 303 | * |
| 293 | * Acquires a w/w mutex with the given context after a wound case. This function | 304 | * Acquires a w/w mutex with the given context after a die case. This function |
| 294 | * will sleep until the lock becomes available and returns 0 when the lock has | 305 | * will sleep until the lock becomes available and returns 0 when the lock has |
| 295 | * been acquired. If a signal arrives while waiting for the lock then this | 306 | * been acquired. If a signal arrives while waiting for the lock then this |
| 296 | * function returns -EINTR. | 307 | * function returns -EINTR. |
