diff options
author | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2013-07-05 03:29:32 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-12 06:07:46 -0400 |
commit | 1b375dc30710180c4b88cc59caba6e3481ec5c8b (patch) | |
tree | 24b5cfc929db57207afec4633909b8e6755df5f9 /include/linux/ww_mutex.h | |
parent | 2e17c5a97e231f3cb426f4b7895eab5be5c5442e (diff) |
mutex: Move ww_mutex definitions to ww_mutex.h
Move the definitions for wound/wait mutexes out to a separate
header, ww_mutex.h. This reduces clutter in mutex.h, and
increases readability.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Cc: Dave Airlie <airlied@gmail.com>
Link: http://lkml.kernel.org/r/51D675DC.3000907@canonical.com
[ Tidied up the code a bit. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/ww_mutex.h')
-rw-r--r-- | include/linux/ww_mutex.h | 378 |
1 files changed, 378 insertions, 0 deletions
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h new file mode 100644 index 000000000000..760399a470bd --- /dev/null +++ b/include/linux/ww_mutex.h | |||
@@ -0,0 +1,378 @@ | |||
1 | /* | ||
2 | * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance | ||
3 | * | ||
4 | * Original mutex implementation started by Ingo Molnar: | ||
5 | * | ||
6 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * | ||
8 | * Wound/wait implementation: | ||
9 | * Copyright (C) 2013 Canonical Ltd. | ||
10 | * | ||
11 | * This file contains the main data structure and API definitions. | ||
12 | */ | ||
13 | |||
14 | #ifndef __LINUX_WW_MUTEX_H | ||
15 | #define __LINUX_WW_MUTEX_H | ||
16 | |||
17 | #include <linux/mutex.h> | ||
18 | |||
19 | struct ww_class { | ||
20 | atomic_long_t stamp; | ||
21 | struct lock_class_key acquire_key; | ||
22 | struct lock_class_key mutex_key; | ||
23 | const char *acquire_name; | ||
24 | const char *mutex_name; | ||
25 | }; | ||
26 | |||
27 | struct ww_acquire_ctx { | ||
28 | struct task_struct *task; | ||
29 | unsigned long stamp; | ||
30 | unsigned acquired; | ||
31 | #ifdef CONFIG_DEBUG_MUTEXES | ||
32 | unsigned done_acquire; | ||
33 | struct ww_class *ww_class; | ||
34 | struct ww_mutex *contending_lock; | ||
35 | #endif | ||
36 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
37 | struct lockdep_map dep_map; | ||
38 | #endif | ||
39 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
40 | unsigned deadlock_inject_interval; | ||
41 | unsigned deadlock_inject_countdown; | ||
42 | #endif | ||
43 | }; | ||
44 | |||
45 | struct ww_mutex { | ||
46 | struct mutex base; | ||
47 | struct ww_acquire_ctx *ctx; | ||
48 | #ifdef CONFIG_DEBUG_MUTEXES | ||
49 | struct ww_class *ww_class; | ||
50 | #endif | ||
51 | }; | ||
52 | |||
53 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
54 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ | ||
55 | , .ww_class = &ww_class | ||
56 | #else | ||
57 | # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) | ||
58 | #endif | ||
59 | |||
60 | #define __WW_CLASS_INITIALIZER(ww_class) \ | ||
61 | { .stamp = ATOMIC_LONG_INIT(0) \ | ||
62 | , .acquire_name = #ww_class "_acquire" \ | ||
63 | , .mutex_name = #ww_class "_mutex" } | ||
64 | |||
65 | #define __WW_MUTEX_INITIALIZER(lockname, class) \ | ||
66 | { .base = { \__MUTEX_INITIALIZER(lockname) } \ | ||
67 | __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } | ||
68 | |||
69 | #define DEFINE_WW_CLASS(classname) \ | ||
70 | struct ww_class classname = __WW_CLASS_INITIALIZER(classname) | ||
71 | |||
72 | #define DEFINE_WW_MUTEX(mutexname, ww_class) \ | ||
73 | struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) | ||
74 | |||
75 | /** | ||
76 | * ww_mutex_init - initialize the w/w mutex | ||
77 | * @lock: the mutex to be initialized | ||
78 | * @ww_class: the w/w class the mutex should belong to | ||
79 | * | ||
80 | * Initialize the w/w mutex to unlocked state and associate it with the given | ||
81 | * class. | ||
82 | * | ||
83 | * It is not allowed to initialize an already locked mutex. | ||
84 | */ | ||
85 | static inline void ww_mutex_init(struct ww_mutex *lock, | ||
86 | struct ww_class *ww_class) | ||
87 | { | ||
88 | __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); | ||
89 | lock->ctx = NULL; | ||
90 | #ifdef CONFIG_DEBUG_MUTEXES | ||
91 | lock->ww_class = ww_class; | ||
92 | #endif | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * ww_acquire_init - initialize a w/w acquire context | ||
97 | * @ctx: w/w acquire context to initialize | ||
98 | * @ww_class: w/w class of the context | ||
99 | * | ||
100 | * Initializes an context to acquire multiple mutexes of the given w/w class. | ||
101 | * | ||
102 | * Context-based w/w mutex acquiring can be done in any order whatsoever within | ||
103 | * a given lock class. Deadlocks will be detected and handled with the | ||
104 | * wait/wound logic. | ||
105 | * | ||
106 | * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can | ||
107 | * result in undetected deadlocks and is so forbidden. Mixing different contexts | ||
108 | * for the same w/w class when acquiring mutexes can also result in undetected | ||
109 | * deadlocks, and is hence also forbidden. Both types of abuse will be caught by | ||
110 | * enabling CONFIG_PROVE_LOCKING. | ||
111 | * | ||
112 | * Nesting of acquire contexts for _different_ w/w classes is possible, subject | ||
113 | * to the usual locking rules between different lock classes. | ||
114 | * | ||
115 | * An acquire context must be released with ww_acquire_fini by the same task | ||
116 | * before the memory is freed. It is recommended to allocate the context itself | ||
117 | * on the stack. | ||
118 | */ | ||
119 | static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, | ||
120 | struct ww_class *ww_class) | ||
121 | { | ||
122 | ctx->task = current; | ||
123 | ctx->stamp = atomic_long_inc_return(&ww_class->stamp); | ||
124 | ctx->acquired = 0; | ||
125 | #ifdef CONFIG_DEBUG_MUTEXES | ||
126 | ctx->ww_class = ww_class; | ||
127 | ctx->done_acquire = 0; | ||
128 | ctx->contending_lock = NULL; | ||
129 | #endif | ||
130 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
131 | debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); | ||
132 | lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, | ||
133 | &ww_class->acquire_key, 0); | ||
134 | mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); | ||
135 | #endif | ||
136 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH | ||
137 | ctx->deadlock_inject_interval = 1; | ||
138 | ctx->deadlock_inject_countdown = ctx->stamp & 0xf; | ||
139 | #endif | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * ww_acquire_done - marks the end of the acquire phase | ||
144 | * @ctx: the acquire context | ||
145 | * | ||
146 | * Marks the end of the acquire phase, any further w/w mutex lock calls using | ||
147 | * this context are forbidden. | ||
148 | * | ||
149 | * Calling this function is optional, it is just useful to document w/w mutex | ||
150 | * code and clearly designated the acquire phase from actually using the locked | ||
151 | * data structures. | ||
152 | */ | ||
153 | static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) | ||
154 | { | ||
155 | #ifdef CONFIG_DEBUG_MUTEXES | ||
156 | lockdep_assert_held(ctx); | ||
157 | |||
158 | DEBUG_LOCKS_WARN_ON(ctx->done_acquire); | ||
159 | ctx->done_acquire = 1; | ||
160 | #endif | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * ww_acquire_fini - releases a w/w acquire context | ||
165 | * @ctx: the acquire context to free | ||
166 | * | ||
167 | * Releases a w/w acquire context. This must be called _after_ all acquired w/w | ||
168 | * mutexes have been released with ww_mutex_unlock. | ||
169 | */ | ||
170 | static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) | ||
171 | { | ||
172 | #ifdef CONFIG_DEBUG_MUTEXES | ||
173 | mutex_release(&ctx->dep_map, 0, _THIS_IP_); | ||
174 | |||
175 | DEBUG_LOCKS_WARN_ON(ctx->acquired); | ||
176 | if (!config_enabled(CONFIG_PROVE_LOCKING)) | ||
177 | /* | ||
178 | * lockdep will normally handle this, | ||
179 | * but fail without anyway | ||
180 | */ | ||
181 | ctx->done_acquire = 1; | ||
182 | |||
183 | if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) | ||
184 | /* ensure ww_acquire_fini will still fail if called twice */ | ||
185 | ctx->acquired = ~0U; | ||
186 | #endif | ||
187 | } | ||
188 | |||
189 | extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, | ||
190 | struct ww_acquire_ctx *ctx); | ||
191 | extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, | ||
192 | struct ww_acquire_ctx *ctx); | ||
193 | |||
194 | /** | ||
195 | * ww_mutex_lock - acquire the w/w mutex | ||
196 | * @lock: the mutex to be acquired | ||
197 | * @ctx: w/w acquire context, or NULL to acquire only a single lock. | ||
198 | * | ||
199 | * Lock the w/w mutex exclusively for this task. | ||
200 | * | ||
201 | * Deadlocks within a given w/w class of locks are detected and handled with the | ||
202 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | ||
203 | * will either sleep until it is (wait case). Or it selects the current context | ||
204 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | ||
205 | * same lock with the same context twice is also detected and signalled by | ||
206 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. | ||
207 | * | ||
208 | * In the wound case the caller must release all currently held w/w mutexes for | ||
209 | * the given context and then wait for this contending lock to be available by | ||
210 | * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this | ||
211 | * lock and proceed with trying to acquire further w/w mutexes (e.g. when | ||
212 | * scanning through lru lists trying to free resources). | ||
213 | * | ||
214 | * The mutex must later on be released by the same task that | ||
215 | * acquired it. The task may not exit without first unlocking the mutex. Also, | ||
216 | * kernel memory where the mutex resides must not be freed with the mutex still | ||
217 | * locked. The mutex must first be initialized (or statically defined) before it | ||
218 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be | ||
219 | * of the same w/w lock class as was used to initialize the acquire context. | ||
220 | * | ||
221 | * A mutex acquired with this function must be released with ww_mutex_unlock. | ||
222 | */ | ||
223 | static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
224 | { | ||
225 | if (ctx) | ||
226 | return __ww_mutex_lock(lock, ctx); | ||
227 | |||
228 | mutex_lock(&lock->base); | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible | ||
234 | * @lock: the mutex to be acquired | ||
235 | * @ctx: w/w acquire context | ||
236 | * | ||
237 | * Lock the w/w mutex exclusively for this task. | ||
238 | * | ||
239 | * Deadlocks within a given w/w class of locks are detected and handled with the | ||
240 | * wait/wound algorithm. If the lock isn't immediately avaiable this function | ||
241 | * will either sleep until it is (wait case). Or it selects the current context | ||
242 | * for backing off by returning -EDEADLK (wound case). Trying to acquire the | ||
243 | * same lock with the same context twice is also detected and signalled by | ||
244 | * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a | ||
245 | * signal arrives while waiting for the lock then this function returns -EINTR. | ||
246 | * | ||
247 | * In the wound case the caller must release all currently held w/w mutexes for | ||
248 | * the given context and then wait for this contending lock to be available by | ||
249 | * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to | ||
250 | * not acquire this lock and proceed with trying to acquire further w/w mutexes | ||
251 | * (e.g. when scanning through lru lists trying to free resources). | ||
252 | * | ||
253 | * The mutex must later on be released by the same task that | ||
254 | * acquired it. The task may not exit without first unlocking the mutex. Also, | ||
255 | * kernel memory where the mutex resides must not be freed with the mutex still | ||
256 | * locked. The mutex must first be initialized (or statically defined) before it | ||
257 | * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be | ||
258 | * of the same w/w lock class as was used to initialize the acquire context. | ||
259 | * | ||
260 | * A mutex acquired with this function must be released with ww_mutex_unlock. | ||
261 | */ | ||
262 | static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, | ||
263 | struct ww_acquire_ctx *ctx) | ||
264 | { | ||
265 | if (ctx) | ||
266 | return __ww_mutex_lock_interruptible(lock, ctx); | ||
267 | else | ||
268 | return mutex_lock_interruptible(&lock->base); | ||
269 | } | ||
270 | |||
271 | /** | ||
272 | * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex | ||
273 | * @lock: the mutex to be acquired | ||
274 | * @ctx: w/w acquire context | ||
275 | * | ||
276 | * Acquires a w/w mutex with the given context after a wound case. This function | ||
277 | * will sleep until the lock becomes available. | ||
278 | * | ||
279 | * The caller must have released all w/w mutexes already acquired with the | ||
280 | * context and then call this function on the contended lock. | ||
281 | * | ||
282 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it | ||
283 | * needs with ww_mutex_lock. Note that the -EALREADY return code from | ||
284 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. | ||
285 | * | ||
286 | * It is forbidden to call this function with any other w/w mutexes associated | ||
287 | * with the context held. It is forbidden to call this on anything else than the | ||
288 | * contending mutex. | ||
289 | * | ||
290 | * Note that the slowpath lock acquiring can also be done by calling | ||
291 | * ww_mutex_lock directly. This function here is simply to help w/w mutex | ||
292 | * locking code readability by clearly denoting the slowpath. | ||
293 | */ | ||
294 | static inline void | ||
295 | ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | ||
296 | { | ||
297 | int ret; | ||
298 | #ifdef CONFIG_DEBUG_MUTEXES | ||
299 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); | ||
300 | #endif | ||
301 | ret = ww_mutex_lock(lock, ctx); | ||
302 | (void)ret; | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible | ||
307 | * @lock: the mutex to be acquired | ||
308 | * @ctx: w/w acquire context | ||
309 | * | ||
310 | * Acquires a w/w mutex with the given context after a wound case. This function | ||
311 | * will sleep until the lock becomes available and returns 0 when the lock has | ||
312 | * been acquired. If a signal arrives while waiting for the lock then this | ||
313 | * function returns -EINTR. | ||
314 | * | ||
315 | * The caller must have released all w/w mutexes already acquired with the | ||
316 | * context and then call this function on the contended lock. | ||
317 | * | ||
318 | * Afterwards the caller may continue to (re)acquire the other w/w mutexes it | ||
319 | * needs with ww_mutex_lock. Note that the -EALREADY return code from | ||
320 | * ww_mutex_lock can be used to avoid locking this contended mutex twice. | ||
321 | * | ||
322 | * It is forbidden to call this function with any other w/w mutexes associated | ||
323 | * with the given context held. It is forbidden to call this on anything else | ||
324 | * than the contending mutex. | ||
325 | * | ||
326 | * Note that the slowpath lock acquiring can also be done by calling | ||
327 | * ww_mutex_lock_interruptible directly. This function here is simply to help | ||
328 | * w/w mutex locking code readability by clearly denoting the slowpath. | ||
329 | */ | ||
330 | static inline int __must_check | ||
331 | ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, | ||
332 | struct ww_acquire_ctx *ctx) | ||
333 | { | ||
334 | #ifdef CONFIG_DEBUG_MUTEXES | ||
335 | DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); | ||
336 | #endif | ||
337 | return ww_mutex_lock_interruptible(lock, ctx); | ||
338 | } | ||
339 | |||
340 | extern void ww_mutex_unlock(struct ww_mutex *lock); | ||
341 | |||
342 | /** | ||
343 | * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context | ||
344 | * @lock: mutex to lock | ||
345 | * | ||
346 | * Trylocks a mutex without acquire context, so no deadlock detection is | ||
347 | * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. | ||
348 | */ | ||
349 | static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) | ||
350 | { | ||
351 | return mutex_trylock(&lock->base); | ||
352 | } | ||
353 | |||
354 | /*** | ||
355 | * ww_mutex_destroy - mark a w/w mutex unusable | ||
356 | * @lock: the mutex to be destroyed | ||
357 | * | ||
358 | * This function marks the mutex uninitialized, and any subsequent | ||
359 | * use of the mutex is forbidden. The mutex must not be locked when | ||
360 | * this function is called. | ||
361 | */ | ||
362 | static inline void ww_mutex_destroy(struct ww_mutex *lock) | ||
363 | { | ||
364 | mutex_destroy(&lock->base); | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * ww_mutex_is_locked - is the w/w mutex locked | ||
369 | * @lock: the mutex to be queried | ||
370 | * | ||
371 | * Returns 1 if the mutex is locked, 0 if unlocked. | ||
372 | */ | ||
373 | static inline bool ww_mutex_is_locked(struct ww_mutex *lock) | ||
374 | { | ||
375 | return mutex_is_locked(&lock->base); | ||
376 | } | ||
377 | |||
378 | #endif | ||