aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-07-05 16:30:32 -0400
committerDave Airlie <airlied@redhat.com>2018-07-05 18:47:14 -0400
commitc5be9b54034339a7983a1167cdc80dc27fea1799 (patch)
treea73128c5a42a8338fa7ae76f89069aaadfe57563
parent96b2bb0b9637df1a68bb5b6853903a207fabcefd (diff)
parent07c13bb78c8b8a9cb6ee169659528945038d5e85 (diff)
Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next
A patchset worked out together with Peter Zijlstra. Ingo is OK with taking it through the DRM tree: This is a small fallout from a work to allow batching WW mutex locks and unlocks. Our Wound-Wait mutexes actually don't use the Wound-Wait algorithm but the Wait-Die algorithm. One could perhaps rename those mutexes tree-wide to "Wait-Die mutexes" or "Deadlock Avoidance mutexes". Another approach suggested here is to implement also the "Wound-Wait" algorithm as a per-WW-class choice, as it has advantages in some cases. See for example http://www.mathcs.emory.edu/~cheung/Courses/554/Syllabus/8-recv+serial/deadlock-compare.html Now Wound-Wait is a preemptive algorithm, and the preemption is implemented using a lazy scheme: If a wounded transaction is about to go to sleep on a contended WW mutex, we return -EDEADLK. That is sufficient for deadlock prevention. Since with WW mutexes we also require the aborted transaction to sleep waiting to lock the WW mutex it was aborted on, this choice also provides a suitable WW mutex to sleep on. If we were to return -EDEADLK on the first WW mutex lock after the transaction was wounded whether the WW mutex was contended or not, the transaction might frequently be restarted without a wait, which is far from optimal. Note also that with the lazy preemption scheme, contrary to Wait-Die there will be no rollbacks on lock contention of locks held by a transaction that has completed its locking sequence. The modeset locks are then changed from Wait-Die to Wound-Wait since the typical locking pattern of those locks very well matches the criterion for a substantial reduction in the number of rollbacks. For reservation objects, the benefit is more unclear at this point and they remain using Wait-Die. Signed-off-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180703105339.4461-1-thellstrom@vmware.com
-rw-r--r--Documentation/locking/ww-mutex-design.txt65
-rw-r--r--drivers/dma-buf/reservation.c2
-rw-r--r--include/linux/ww_mutex.h45
-rw-r--r--kernel/locking/locktorture.c2
-rw-r--r--kernel/locking/mutex.c345
-rw-r--r--kernel/locking/test-ww_mutex.c2
-rw-r--r--lib/locking-selftest.c2
7 files changed, 344 insertions, 119 deletions
diff --git a/Documentation/locking/ww-mutex-design.txt b/Documentation/locking/ww-mutex-design.txt
index 34c3a1b50b9a..f0ed7c30e695 100644
--- a/Documentation/locking/ww-mutex-design.txt
+++ b/Documentation/locking/ww-mutex-design.txt
@@ -1,4 +1,4 @@
1Wait/Wound Deadlock-Proof Mutex Design 1Wound/Wait Deadlock-Proof Mutex Design
2====================================== 2======================================
3 3
4Please read mutex-design.txt first, as it applies to wait/wound mutexes too. 4Please read mutex-design.txt first, as it applies to wait/wound mutexes too.
@@ -32,10 +32,26 @@ the oldest task) wins, and the one with the higher reservation id (i.e. the
32younger task) unlocks all of the buffers that it has already locked, and then 32younger task) unlocks all of the buffers that it has already locked, and then
33tries again. 33tries again.
34 34
35In the RDBMS literature this deadlock handling approach is called wait/wound: 35In the RDBMS literature, a reservation ticket is associated with a transaction.
36The older tasks waits until it can acquire the contended lock. The younger tasks 36and the deadlock handling approach is called Wait-Die. The name is based on
37needs to back off and drop all the locks it is currently holding, i.e. the 37the actions of a locking thread when it encounters an already locked mutex.
38younger task is wounded. 38If the transaction holding the lock is younger, the locking transaction waits.
39If the transaction holding the lock is older, the locking transaction backs off
40and dies. Hence Wait-Die.
41There is also another algorithm called Wound-Wait:
42If the transaction holding the lock is younger, the locking transaction
43wounds the transaction holding the lock, requesting it to die.
44If the transaction holding the lock is older, it waits for the other
45transaction. Hence Wound-Wait.
46The two algorithms are both fair in that a transaction will eventually succeed.
47However, the Wound-Wait algorithm is typically stated to generate fewer backoffs
48compared to Wait-Die, but is, on the other hand, associated with more work than
49Wait-Die when recovering from a backoff. Wound-Wait is also a preemptive
50algorithm in that transactions are wounded by other transactions, and that
51requires a reliable way to pick up up the wounded condition and preempt the
52running transaction. Note that this is not the same as process preemption. A
53Wound-Wait transaction is considered preempted when it dies (returning
54-EDEADLK) following a wound.
39 55
40Concepts 56Concepts
41-------- 57--------
@@ -47,18 +63,20 @@ Acquire context: To ensure eventual forward progress it is important the a task
47trying to acquire locks doesn't grab a new reservation id, but keeps the one it 63trying to acquire locks doesn't grab a new reservation id, but keeps the one it
48acquired when starting the lock acquisition. This ticket is stored in the 64acquired when starting the lock acquisition. This ticket is stored in the
49acquire context. Furthermore the acquire context keeps track of debugging state 65acquire context. Furthermore the acquire context keeps track of debugging state
50to catch w/w mutex interface abuse. 66to catch w/w mutex interface abuse. An acquire context is representing a
67transaction.
51 68
52W/w class: In contrast to normal mutexes the lock class needs to be explicit for 69W/w class: In contrast to normal mutexes the lock class needs to be explicit for
53w/w mutexes, since it is required to initialize the acquire context. 70w/w mutexes, since it is required to initialize the acquire context. The lock
71class also specifies what algorithm to use, Wound-Wait or Wait-Die.
54 72
55Furthermore there are three different class of w/w lock acquire functions: 73Furthermore there are three different class of w/w lock acquire functions:
56 74
57* Normal lock acquisition with a context, using ww_mutex_lock. 75* Normal lock acquisition with a context, using ww_mutex_lock.
58 76
59* Slowpath lock acquisition on the contending lock, used by the wounded task 77* Slowpath lock acquisition on the contending lock, used by the task that just
60 after having dropped all already acquired locks. These functions have the 78 killed its transaction after having dropped all already acquired locks.
61 _slow postfix. 79 These functions have the _slow postfix.
62 80
63 From a simple semantics point-of-view the _slow functions are not strictly 81 From a simple semantics point-of-view the _slow functions are not strictly
64 required, since simply calling the normal ww_mutex_lock functions on the 82 required, since simply calling the normal ww_mutex_lock functions on the
@@ -90,6 +108,12 @@ provided.
90Usage 108Usage
91----- 109-----
92 110
111The algorithm (Wait-Die vs Wound-Wait) is chosen by using either
112DEFINE_WW_CLASS() (Wound-Wait) or DEFINE_WD_CLASS() (Wait-Die)
113As a rough rule of thumb, use Wound-Wait iff you
114expect the number of simultaneous competing transactions to be typically small,
115and you want to reduce the number of rollbacks.
116
93Three different ways to acquire locks within the same w/w class. Common 117Three different ways to acquire locks within the same w/w class. Common
94definitions for methods #1 and #2: 118definitions for methods #1 and #2:
95 119
@@ -220,7 +244,7 @@ mutexes are a natural fit for such a case for two reasons:
220 244
221Note that this approach differs in two important ways from the above methods: 245Note that this approach differs in two important ways from the above methods:
222- Since the list of objects is dynamically constructed (and might very well be 246- Since the list of objects is dynamically constructed (and might very well be
223 different when retrying due to hitting the -EDEADLK wound condition) there's 247 different when retrying due to hitting the -EDEADLK die condition) there's
224 no need to keep any object on a persistent list when it's not locked. We can 248 no need to keep any object on a persistent list when it's not locked. We can
225 therefore move the list_head into the object itself. 249 therefore move the list_head into the object itself.
226- On the other hand the dynamic object list construction also means that the -EALREADY return 250- On the other hand the dynamic object list construction also means that the -EALREADY return
@@ -312,12 +336,23 @@ Design:
312 We maintain the following invariants for the wait list: 336 We maintain the following invariants for the wait list:
313 (1) Waiters with an acquire context are sorted by stamp order; waiters 337 (1) Waiters with an acquire context are sorted by stamp order; waiters
314 without an acquire context are interspersed in FIFO order. 338 without an acquire context are interspersed in FIFO order.
315 (2) Among waiters with contexts, only the first one can have other locks 339 (2) For Wait-Die, among waiters with contexts, only the first one can have
316 acquired already (ctx->acquired > 0). Note that this waiter may come 340 other locks acquired already (ctx->acquired > 0). Note that this waiter
317 after other waiters without contexts in the list. 341 may come after other waiters without contexts in the list.
342
343 The Wound-Wait preemption is implemented with a lazy-preemption scheme:
344 The wounded status of the transaction is checked only when there is
345 contention for a new lock and hence a true chance of deadlock. In that
346 situation, if the transaction is wounded, it backs off, clears the
347 wounded status and retries. A great benefit of implementing preemption in
348 this way is that the wounded transaction can identify a contending lock to
349 wait for before restarting the transaction. Just blindly restarting the
350 transaction would likely make the transaction end up in a situation where
351 it would have to back off again.
318 352
319 In general, not much contention is expected. The locks are typically used to 353 In general, not much contention is expected. The locks are typically used to
320 serialize access to resources for devices. 354 serialize access to resources for devices, and optimization focus should
355 therefore be directed towards the uncontended cases.
321 356
322Lockdep: 357Lockdep:
323 Special care has been taken to warn for as many cases of api abuse 358 Special care has been taken to warn for as many cases of api abuse
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 314eb1071cce..20bf90f4ee63 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -46,7 +46,7 @@
46 * write-side updates. 46 * write-side updates.
47 */ 47 */
48 48
49DEFINE_WW_CLASS(reservation_ww_class); 49DEFINE_WD_CLASS(reservation_ww_class);
50EXPORT_SYMBOL(reservation_ww_class); 50EXPORT_SYMBOL(reservation_ww_class);
51 51
52struct lock_class_key reservation_seqcount_class; 52struct lock_class_key reservation_seqcount_class;
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 39fda195bf78..3af7c0e03be5 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -6,8 +6,10 @@
6 * 6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 * 8 *
9 * Wound/wait implementation: 9 * Wait/Die implementation:
10 * Copyright (C) 2013 Canonical Ltd. 10 * Copyright (C) 2013 Canonical Ltd.
11 * Choice of algorithm:
12 * Copyright (C) 2018 WMWare Inc.
11 * 13 *
12 * This file contains the main data structure and API definitions. 14 * This file contains the main data structure and API definitions.
13 */ 15 */
@@ -23,14 +25,17 @@ struct ww_class {
23 struct lock_class_key mutex_key; 25 struct lock_class_key mutex_key;
24 const char *acquire_name; 26 const char *acquire_name;
25 const char *mutex_name; 27 const char *mutex_name;
28 unsigned int is_wait_die;
26}; 29};
27 30
28struct ww_acquire_ctx { 31struct ww_acquire_ctx {
29 struct task_struct *task; 32 struct task_struct *task;
30 unsigned long stamp; 33 unsigned long stamp;
31 unsigned acquired; 34 unsigned int acquired;
35 unsigned short wounded;
36 unsigned short is_wait_die;
32#ifdef CONFIG_DEBUG_MUTEXES 37#ifdef CONFIG_DEBUG_MUTEXES
33 unsigned done_acquire; 38 unsigned int done_acquire;
34 struct ww_class *ww_class; 39 struct ww_class *ww_class;
35 struct ww_mutex *contending_lock; 40 struct ww_mutex *contending_lock;
36#endif 41#endif
@@ -38,8 +43,8 @@ struct ww_acquire_ctx {
38 struct lockdep_map dep_map; 43 struct lockdep_map dep_map;
39#endif 44#endif
40#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH 45#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
41 unsigned deadlock_inject_interval; 46 unsigned int deadlock_inject_interval;
42 unsigned deadlock_inject_countdown; 47 unsigned int deadlock_inject_countdown;
43#endif 48#endif
44}; 49};
45 50
@@ -58,17 +63,21 @@ struct ww_mutex {
58# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) 63# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
59#endif 64#endif
60 65
61#define __WW_CLASS_INITIALIZER(ww_class) \ 66#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \
62 { .stamp = ATOMIC_LONG_INIT(0) \ 67 { .stamp = ATOMIC_LONG_INIT(0) \
63 , .acquire_name = #ww_class "_acquire" \ 68 , .acquire_name = #ww_class "_acquire" \
64 , .mutex_name = #ww_class "_mutex" } 69 , .mutex_name = #ww_class "_mutex" \
70 , .is_wait_die = _is_wait_die }
65 71
66#define __WW_MUTEX_INITIALIZER(lockname, class) \ 72#define __WW_MUTEX_INITIALIZER(lockname, class) \
67 { .base = __MUTEX_INITIALIZER(lockname.base) \ 73 { .base = __MUTEX_INITIALIZER(lockname.base) \
68 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) } 74 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
69 75
76#define DEFINE_WD_CLASS(classname) \
77 struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
78
70#define DEFINE_WW_CLASS(classname) \ 79#define DEFINE_WW_CLASS(classname) \
71 struct ww_class classname = __WW_CLASS_INITIALIZER(classname) 80 struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
72 81
73#define DEFINE_WW_MUTEX(mutexname, ww_class) \ 82#define DEFINE_WW_MUTEX(mutexname, ww_class) \
74 struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) 83 struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
@@ -102,7 +111,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
102 * 111 *
103 * Context-based w/w mutex acquiring can be done in any order whatsoever within 112 * Context-based w/w mutex acquiring can be done in any order whatsoever within
104 * a given lock class. Deadlocks will be detected and handled with the 113 * a given lock class. Deadlocks will be detected and handled with the
105 * wait/wound logic. 114 * wait/die logic.
106 * 115 *
107 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can 116 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
108 * result in undetected deadlocks and is so forbidden. Mixing different contexts 117 * result in undetected deadlocks and is so forbidden. Mixing different contexts
@@ -123,6 +132,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
123 ctx->task = current; 132 ctx->task = current;
124 ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp); 133 ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
125 ctx->acquired = 0; 134 ctx->acquired = 0;
135 ctx->wounded = false;
136 ctx->is_wait_die = ww_class->is_wait_die;
126#ifdef CONFIG_DEBUG_MUTEXES 137#ifdef CONFIG_DEBUG_MUTEXES
127 ctx->ww_class = ww_class; 138 ctx->ww_class = ww_class;
128 ctx->done_acquire = 0; 139 ctx->done_acquire = 0;
@@ -195,13 +206,13 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
195 * Lock the w/w mutex exclusively for this task. 206 * Lock the w/w mutex exclusively for this task.
196 * 207 *
197 * Deadlocks within a given w/w class of locks are detected and handled with the 208 * Deadlocks within a given w/w class of locks are detected and handled with the
198 * wait/wound algorithm. If the lock isn't immediately avaiable this function 209 * wait/die algorithm. If the lock isn't immediately available this function
199 * will either sleep until it is (wait case). Or it selects the current context 210 * will either sleep until it is (wait case). Or it selects the current context
200 * for backing off by returning -EDEADLK (wound case). Trying to acquire the 211 * for backing off by returning -EDEADLK (die case). Trying to acquire the
201 * same lock with the same context twice is also detected and signalled by 212 * same lock with the same context twice is also detected and signalled by
202 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. 213 * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
203 * 214 *
204 * In the wound case the caller must release all currently held w/w mutexes for 215 * In the die case the caller must release all currently held w/w mutexes for
205 * the given context and then wait for this contending lock to be available by 216 * the given context and then wait for this contending lock to be available by
206 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this 217 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
207 * lock and proceed with trying to acquire further w/w mutexes (e.g. when 218 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
@@ -226,14 +237,14 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
226 * Lock the w/w mutex exclusively for this task. 237 * Lock the w/w mutex exclusively for this task.
227 * 238 *
228 * Deadlocks within a given w/w class of locks are detected and handled with the 239 * Deadlocks within a given w/w class of locks are detected and handled with the
229 * wait/wound algorithm. If the lock isn't immediately avaiable this function 240 * wait/die algorithm. If the lock isn't immediately available this function
230 * will either sleep until it is (wait case). Or it selects the current context 241 * will either sleep until it is (wait case). Or it selects the current context
231 * for backing off by returning -EDEADLK (wound case). Trying to acquire the 242 * for backing off by returning -EDEADLK (die case). Trying to acquire the
232 * same lock with the same context twice is also detected and signalled by 243 * same lock with the same context twice is also detected and signalled by
233 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a 244 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
234 * signal arrives while waiting for the lock then this function returns -EINTR. 245 * signal arrives while waiting for the lock then this function returns -EINTR.
235 * 246 *
236 * In the wound case the caller must release all currently held w/w mutexes for 247 * In the die case the caller must release all currently held w/w mutexes for
237 * the given context and then wait for this contending lock to be available by 248 * the given context and then wait for this contending lock to be available by
238 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to 249 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
239 * not acquire this lock and proceed with trying to acquire further w/w mutexes 250 * not acquire this lock and proceed with trying to acquire further w/w mutexes
@@ -256,7 +267,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
256 * @lock: the mutex to be acquired 267 * @lock: the mutex to be acquired
257 * @ctx: w/w acquire context 268 * @ctx: w/w acquire context
258 * 269 *
259 * Acquires a w/w mutex with the given context after a wound case. This function 270 * Acquires a w/w mutex with the given context after a die case. This function
260 * will sleep until the lock becomes available. 271 * will sleep until the lock becomes available.
261 * 272 *
262 * The caller must have released all w/w mutexes already acquired with the 273 * The caller must have released all w/w mutexes already acquired with the
@@ -290,7 +301,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
290 * @lock: the mutex to be acquired 301 * @lock: the mutex to be acquired
291 * @ctx: w/w acquire context 302 * @ctx: w/w acquire context
292 * 303 *
293 * Acquires a w/w mutex with the given context after a wound case. This function 304 * Acquires a w/w mutex with the given context after a die case. This function
294 * will sleep until the lock becomes available and returns 0 when the lock has 305 * will sleep until the lock becomes available and returns 0 when the lock has
295 * been acquired. If a signal arrives while waiting for the lock then this 306 * been acquired. If a signal arrives while waiting for the lock then this
296 * function returns -EINTR. 307 * function returns -EINTR.
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 8402b3349dca..c28224347d69 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -365,7 +365,7 @@ static struct lock_torture_ops mutex_lock_ops = {
365}; 365};
366 366
367#include <linux/ww_mutex.h> 367#include <linux/ww_mutex.h>
368static DEFINE_WW_CLASS(torture_ww_class); 368static DEFINE_WD_CLASS(torture_ww_class);
369static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class); 369static DEFINE_WW_MUTEX(torture_ww_mutex_0, &torture_ww_class);
370static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class); 370static DEFINE_WW_MUTEX(torture_ww_mutex_1, &torture_ww_class);
371static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class); 371static DEFINE_WW_MUTEX(torture_ww_mutex_2, &torture_ww_class);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index f44f658ae629..1a81a1257b3f 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -174,6 +174,21 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
174} 174}
175 175
176/* 176/*
177 * Add @waiter to a given location in the lock wait_list and set the
178 * FLAG_WAITERS flag if it's the first waiter.
179 */
180static void __sched
181__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
182 struct list_head *list)
183{
184 debug_mutex_add_waiter(lock, waiter, current);
185
186 list_add_tail(&waiter->list, list);
187 if (__mutex_waiter_is_first(lock, waiter))
188 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
189}
190
191/*
177 * Give up ownership to a specific task, when @task = NULL, this is equivalent 192 * Give up ownership to a specific task, when @task = NULL, this is equivalent
178 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves 193 * to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves
179 * WAITERS. Provides RELEASE semantics like a regular unlock, the 194 * WAITERS. Provides RELEASE semantics like a regular unlock, the
@@ -244,6 +259,22 @@ void __sched mutex_lock(struct mutex *lock)
244EXPORT_SYMBOL(mutex_lock); 259EXPORT_SYMBOL(mutex_lock);
245#endif 260#endif
246 261
262/*
263 * Wait-Die:
264 * The newer transactions are killed when:
265 * It (the new transaction) makes a request for a lock being held
266 * by an older transaction.
267 *
268 * Wound-Wait:
269 * The newer transactions are wounded when:
270 * An older transaction makes a request for a lock being held by
271 * the newer transaction.
272 */
273
274/*
275 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
276 * it.
277 */
247static __always_inline void 278static __always_inline void
248ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) 279ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
249{ 280{
@@ -282,26 +313,108 @@ ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
282 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); 313 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
283#endif 314#endif
284 ww_ctx->acquired++; 315 ww_ctx->acquired++;
316 ww->ctx = ww_ctx;
285} 317}
286 318
319/*
320 * Determine if context @a is 'after' context @b. IOW, @a is a younger
321 * transaction than @b and depending on algorithm either needs to wait for
322 * @b or die.
323 */
287static inline bool __sched 324static inline bool __sched
288__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) 325__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
289{ 326{
290 return a->stamp - b->stamp <= LONG_MAX && 327
291 (a->stamp != b->stamp || a > b); 328 return (signed long)(a->stamp - b->stamp) > 0;
329}
330
331/*
332 * Wait-Die; wake a younger waiter context (when locks held) such that it can
333 * die.
334 *
335 * Among waiters with context, only the first one can have other locks acquired
336 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
337 * __ww_mutex_check_kill() wake any but the earliest context.
338 */
339static bool __sched
340__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
341 struct ww_acquire_ctx *ww_ctx)
342{
343 if (!ww_ctx->is_wait_die)
344 return false;
345
346 if (waiter->ww_ctx->acquired > 0 &&
347 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
348 debug_mutex_wake_waiter(lock, waiter);
349 wake_up_process(waiter->task);
350 }
351
352 return true;
353}
354
355/*
356 * Wound-Wait; wound a younger @hold_ctx if it holds the lock.
357 *
358 * Wound the lock holder if there are waiters with older transactions than
359 * the lock holders. Even if multiple waiters may wound the lock holder,
360 * it's sufficient that only one does.
361 */
362static bool __ww_mutex_wound(struct mutex *lock,
363 struct ww_acquire_ctx *ww_ctx,
364 struct ww_acquire_ctx *hold_ctx)
365{
366 struct task_struct *owner = __mutex_owner(lock);
367
368 lockdep_assert_held(&lock->wait_lock);
369
370 /*
371 * Possible through __ww_mutex_add_waiter() when we race with
372 * ww_mutex_set_context_fastpath(). In that case we'll get here again
373 * through __ww_mutex_check_waiters().
374 */
375 if (!hold_ctx)
376 return false;
377
378 /*
379 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
380 * it cannot go away because we'll have FLAG_WAITERS set and hold
381 * wait_lock.
382 */
383 if (!owner)
384 return false;
385
386 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
387 hold_ctx->wounded = 1;
388
389 /*
390 * wake_up_process() paired with set_current_state()
391 * inserts sufficient barriers to make sure @owner either sees
392 * it's wounded in __ww_mutex_lock_check_stamp() or has a
393 * wakeup pending to re-read the wounded state.
394 */
395 if (owner != current)
396 wake_up_process(owner);
397
398 return true;
399 }
400
401 return false;
292} 402}
293 403
294/* 404/*
295 * Wake up any waiters that may have to back off when the lock is held by the 405 * We just acquired @lock under @ww_ctx, if there are later contexts waiting
296 * given context. 406 * behind us on the wait-list, check if they need to die, or wound us.
297 * 407 *
298 * Due to the invariants on the wait list, this can only affect the first 408 * See __ww_mutex_add_waiter() for the list-order construction; basically the
299 * waiter with a context. 409 * list is ordered by stamp, smallest (oldest) first.
410 *
411 * This relies on never mixing wait-die/wound-wait on the same wait-list;
412 * which is currently ensured by that being a ww_class property.
300 * 413 *
301 * The current task must not be on the wait list. 414 * The current task must not be on the wait list.
302 */ 415 */
303static void __sched 416static void __sched
304__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx) 417__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
305{ 418{
306 struct mutex_waiter *cur; 419 struct mutex_waiter *cur;
307 420
@@ -311,66 +424,51 @@ __ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
311 if (!cur->ww_ctx) 424 if (!cur->ww_ctx)
312 continue; 425 continue;
313 426
314 if (cur->ww_ctx->acquired > 0 && 427 if (__ww_mutex_die(lock, cur, ww_ctx) ||
315 __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) { 428 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
316 debug_mutex_wake_waiter(lock, cur); 429 break;
317 wake_up_process(cur->task);
318 }
319
320 break;
321 } 430 }
322} 431}
323 432
324/* 433/*
325 * After acquiring lock with fastpath or when we lost out in contested 434 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
326 * slowpath, set ctx and wake up any waiters so they can recheck. 435 * and wake up any waiters so they can recheck.
327 */ 436 */
328static __always_inline void 437static __always_inline void
329ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 438ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
330{ 439{
331 ww_mutex_lock_acquired(lock, ctx); 440 ww_mutex_lock_acquired(lock, ctx);
332 441
333 lock->ctx = ctx;
334
335 /* 442 /*
336 * The lock->ctx update should be visible on all cores before 443 * The lock->ctx update should be visible on all cores before
337 * the atomic read is done, otherwise contended waiters might be 444 * the WAITERS check is done, otherwise contended waiters might be
338 * missed. The contended waiters will either see ww_ctx == NULL 445 * missed. The contended waiters will either see ww_ctx == NULL
339 * and keep spinning, or it will acquire wait_lock, add itself 446 * and keep spinning, or it will acquire wait_lock, add itself
340 * to waiter list and sleep. 447 * to waiter list and sleep.
341 */ 448 */
342 smp_mb(); /* ^^^ */ 449 smp_mb(); /* See comments above and below. */
343 450
344 /* 451 /*
345 * Check if lock is contended, if not there is nobody to wake up 452 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
453 * MB MB
454 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
455 *
456 * The memory barrier above pairs with the memory barrier in
457 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
458 * and/or !empty list.
346 */ 459 */
347 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS))) 460 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
348 return; 461 return;
349 462
350 /* 463 /*
351 * Uh oh, we raced in fastpath, wake up everyone in this case, 464 * Uh oh, we raced in fastpath, check if any of the waiters need to
352 * so they can see the new lock->ctx. 465 * die or wound us.
353 */ 466 */
354 spin_lock(&lock->base.wait_lock); 467 spin_lock(&lock->base.wait_lock);
355 __ww_mutex_wakeup_for_backoff(&lock->base, ctx); 468 __ww_mutex_check_waiters(&lock->base, ctx);
356 spin_unlock(&lock->base.wait_lock); 469 spin_unlock(&lock->base.wait_lock);
357} 470}
358 471
359/*
360 * After acquiring lock in the slowpath set ctx.
361 *
362 * Unlike for the fast path, the caller ensures that waiters are woken up where
363 * necessary.
364 *
365 * Callers must hold the mutex wait_lock.
366 */
367static __always_inline void
368ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
369{
370 ww_mutex_lock_acquired(lock, ctx);
371 lock->ctx = ctx;
372}
373
374#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 472#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
375 473
376static inline 474static inline
@@ -646,37 +744,83 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
646} 744}
647EXPORT_SYMBOL(ww_mutex_unlock); 745EXPORT_SYMBOL(ww_mutex_unlock);
648 746
747
748static __always_inline int __sched
749__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
750{
751 if (ww_ctx->acquired > 0) {
752#ifdef CONFIG_DEBUG_MUTEXES
753 struct ww_mutex *ww;
754
755 ww = container_of(lock, struct ww_mutex, base);
756 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
757 ww_ctx->contending_lock = ww;
758#endif
759 return -EDEADLK;
760 }
761
762 return 0;
763}
764
765
766/*
767 * Check the wound condition for the current lock acquire.
768 *
769 * Wound-Wait: If we're wounded, kill ourself.
770 *
771 * Wait-Die: If we're trying to acquire a lock already held by an older
772 * context, kill ourselves.
773 *
774 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
775 * look at waiters before us in the wait-list.
776 */
649static inline int __sched 777static inline int __sched
650__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter, 778__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
651 struct ww_acquire_ctx *ctx) 779 struct ww_acquire_ctx *ctx)
652{ 780{
653 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); 781 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
654 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); 782 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
655 struct mutex_waiter *cur; 783 struct mutex_waiter *cur;
656 784
785 if (ctx->acquired == 0)
786 return 0;
787
788 if (!ctx->is_wait_die) {
789 if (ctx->wounded)
790 return __ww_mutex_kill(lock, ctx);
791
792 return 0;
793 }
794
657 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx)) 795 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
658 goto deadlock; 796 return __ww_mutex_kill(lock, ctx);
659 797
660 /* 798 /*
661 * If there is a waiter in front of us that has a context, then its 799 * If there is a waiter in front of us that has a context, then its
662 * stamp is earlier than ours and we must back off. 800 * stamp is earlier than ours and we must kill ourself.
663 */ 801 */
664 cur = waiter; 802 cur = waiter;
665 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) { 803 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
666 if (cur->ww_ctx) 804 if (!cur->ww_ctx)
667 goto deadlock; 805 continue;
806
807 return __ww_mutex_kill(lock, ctx);
668 } 808 }
669 809
670 return 0; 810 return 0;
671
672deadlock:
673#ifdef CONFIG_DEBUG_MUTEXES
674 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
675 ctx->contending_lock = ww;
676#endif
677 return -EDEADLK;
678} 811}
679 812
813/*
814 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
815 * first. Such that older contexts are preferred to acquire the lock over
816 * younger contexts.
817 *
818 * Waiters without context are interspersed in FIFO order.
819 *
820 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
821 * older contexts already waiting) to avoid unnecessary waiting and for
822 * Wound-Wait ensure we wound the owning context when it is younger.
823 */
680static inline int __sched 824static inline int __sched
681__ww_mutex_add_waiter(struct mutex_waiter *waiter, 825__ww_mutex_add_waiter(struct mutex_waiter *waiter,
682 struct mutex *lock, 826 struct mutex *lock,
@@ -684,16 +828,21 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
684{ 828{
685 struct mutex_waiter *cur; 829 struct mutex_waiter *cur;
686 struct list_head *pos; 830 struct list_head *pos;
831 bool is_wait_die;
687 832
688 if (!ww_ctx) { 833 if (!ww_ctx) {
689 list_add_tail(&waiter->list, &lock->wait_list); 834 __mutex_add_waiter(lock, waiter, &lock->wait_list);
690 return 0; 835 return 0;
691 } 836 }
692 837
838 is_wait_die = ww_ctx->is_wait_die;
839
693 /* 840 /*
694 * Add the waiter before the first waiter with a higher stamp. 841 * Add the waiter before the first waiter with a higher stamp.
695 * Waiters without a context are skipped to avoid starving 842 * Waiters without a context are skipped to avoid starving
696 * them. 843 * them. Wait-Die waiters may die here. Wound-Wait waiters
844 * never die here, but they are sorted in stamp order and
845 * may wound the lock holder.
697 */ 846 */
698 pos = &lock->wait_list; 847 pos = &lock->wait_list;
699 list_for_each_entry_reverse(cur, &lock->wait_list, list) { 848 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
@@ -701,16 +850,16 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
701 continue; 850 continue;
702 851
703 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) { 852 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
704 /* Back off immediately if necessary. */ 853 /*
705 if (ww_ctx->acquired > 0) { 854 * Wait-Die: if we find an older context waiting, there
706#ifdef CONFIG_DEBUG_MUTEXES 855 * is no point in queueing behind it, as we'd have to
707 struct ww_mutex *ww; 856 * die the moment it would acquire the lock.
708 857 */
709 ww = container_of(lock, struct ww_mutex, base); 858 if (is_wait_die) {
710 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock); 859 int ret = __ww_mutex_kill(lock, ww_ctx);
711 ww_ctx->contending_lock = ww; 860
712#endif 861 if (ret)
713 return -EDEADLK; 862 return ret;
714 } 863 }
715 864
716 break; 865 break;
@@ -718,17 +867,28 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
718 867
719 pos = &cur->list; 868 pos = &cur->list;
720 869
870 /* Wait-Die: ensure younger waiters die. */
871 __ww_mutex_die(lock, cur, ww_ctx);
872 }
873
874 __mutex_add_waiter(lock, waiter, pos);
875
876 /*
877 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
878 * wound that such that we might proceed.
879 */
880 if (!is_wait_die) {
881 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
882
721 /* 883 /*
722 * Wake up the waiter so that it gets a chance to back 884 * See ww_mutex_set_context_fastpath(). Orders setting
723 * off. 885 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
886 * such that either we or the fastpath will wound @ww->ctx.
724 */ 887 */
725 if (cur->ww_ctx->acquired > 0) { 888 smp_mb();
726 debug_mutex_wake_waiter(lock, cur); 889 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
727 wake_up_process(cur->task);
728 }
729 } 890 }
730 891
731 list_add_tail(&waiter->list, pos);
732 return 0; 892 return 0;
733} 893}
734 894
@@ -751,6 +911,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
751 if (use_ww_ctx && ww_ctx) { 911 if (use_ww_ctx && ww_ctx) {
752 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) 912 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
753 return -EALREADY; 913 return -EALREADY;
914
915 /*
916 * Reset the wounded flag after a kill. No other process can
917 * race and wound us here since they can't have a valid owner
918 * pointer if we don't have any locks held.
919 */
920 if (ww_ctx->acquired == 0)
921 ww_ctx->wounded = 0;
754 } 922 }
755 923
756 preempt_disable(); 924 preempt_disable();
@@ -772,7 +940,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
772 */ 940 */
773 if (__mutex_trylock(lock)) { 941 if (__mutex_trylock(lock)) {
774 if (use_ww_ctx && ww_ctx) 942 if (use_ww_ctx && ww_ctx)
775 __ww_mutex_wakeup_for_backoff(lock, ww_ctx); 943 __ww_mutex_check_waiters(lock, ww_ctx);
776 944
777 goto skip_wait; 945 goto skip_wait;
778 } 946 }
@@ -784,25 +952,26 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
784 952
785 if (!use_ww_ctx) { 953 if (!use_ww_ctx) {
786 /* add waiting tasks to the end of the waitqueue (FIFO): */ 954 /* add waiting tasks to the end of the waitqueue (FIFO): */
787 list_add_tail(&waiter.list, &lock->wait_list); 955 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
956
788 957
789#ifdef CONFIG_DEBUG_MUTEXES 958#ifdef CONFIG_DEBUG_MUTEXES
790 waiter.ww_ctx = MUTEX_POISON_WW_CTX; 959 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
791#endif 960#endif
792 } else { 961 } else {
793 /* Add in stamp order, waking up waiters that must back off. */ 962 /*
963 * Add in stamp order, waking up waiters that must kill
964 * themselves.
965 */
794 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx); 966 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
795 if (ret) 967 if (ret)
796 goto err_early_backoff; 968 goto err_early_kill;
797 969
798 waiter.ww_ctx = ww_ctx; 970 waiter.ww_ctx = ww_ctx;
799 } 971 }
800 972
801 waiter.task = current; 973 waiter.task = current;
802 974
803 if (__mutex_waiter_is_first(lock, &waiter))
804 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
805
806 set_current_state(state); 975 set_current_state(state);
807 for (;;) { 976 for (;;) {
808 /* 977 /*
@@ -815,7 +984,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
815 goto acquired; 984 goto acquired;
816 985
817 /* 986 /*
818 * Check for signals and wound conditions while holding 987 * Check for signals and kill conditions while holding
819 * wait_lock. This ensures the lock cancellation is ordered 988 * wait_lock. This ensures the lock cancellation is ordered
820 * against mutex_unlock() and wake-ups do not go missing. 989 * against mutex_unlock() and wake-ups do not go missing.
821 */ 990 */
@@ -824,8 +993,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
824 goto err; 993 goto err;
825 } 994 }
826 995
827 if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) { 996 if (use_ww_ctx && ww_ctx) {
828 ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx); 997 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
829 if (ret) 998 if (ret)
830 goto err; 999 goto err;
831 } 1000 }
@@ -859,6 +1028,16 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
859acquired: 1028acquired:
860 __set_current_state(TASK_RUNNING); 1029 __set_current_state(TASK_RUNNING);
861 1030
1031 if (use_ww_ctx && ww_ctx) {
1032 /*
1033 * Wound-Wait; we stole the lock (!first_waiter), check the
1034 * waiters as anyone might want to wound us.
1035 */
1036 if (!ww_ctx->is_wait_die &&
1037 !__mutex_waiter_is_first(lock, &waiter))
1038 __ww_mutex_check_waiters(lock, ww_ctx);
1039 }
1040
862 mutex_remove_waiter(lock, &waiter, current); 1041 mutex_remove_waiter(lock, &waiter, current);
863 if (likely(list_empty(&lock->wait_list))) 1042 if (likely(list_empty(&lock->wait_list)))
864 __mutex_clear_flag(lock, MUTEX_FLAGS); 1043 __mutex_clear_flag(lock, MUTEX_FLAGS);
@@ -870,7 +1049,7 @@ skip_wait:
870 lock_acquired(&lock->dep_map, ip); 1049 lock_acquired(&lock->dep_map, ip);
871 1050
872 if (use_ww_ctx && ww_ctx) 1051 if (use_ww_ctx && ww_ctx)
873 ww_mutex_set_context_slowpath(ww, ww_ctx); 1052 ww_mutex_lock_acquired(ww, ww_ctx);
874 1053
875 spin_unlock(&lock->wait_lock); 1054 spin_unlock(&lock->wait_lock);
876 preempt_enable(); 1055 preempt_enable();
@@ -879,7 +1058,7 @@ skip_wait:
879err: 1058err:
880 __set_current_state(TASK_RUNNING); 1059 __set_current_state(TASK_RUNNING);
881 mutex_remove_waiter(lock, &waiter, current); 1060 mutex_remove_waiter(lock, &waiter, current);
882err_early_backoff: 1061err_early_kill:
883 spin_unlock(&lock->wait_lock); 1062 spin_unlock(&lock->wait_lock);
884 debug_mutex_free_waiter(&waiter); 1063 debug_mutex_free_waiter(&waiter);
885 mutex_release(&lock->dep_map, 1, ip); 1064 mutex_release(&lock->dep_map, 1, ip);
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 0e4cd64ad2c0..5b915b370d5a 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -26,7 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/ww_mutex.h> 27#include <linux/ww_mutex.h>
28 28
29static DEFINE_WW_CLASS(ww_class); 29static DEFINE_WD_CLASS(ww_class);
30struct workqueue_struct *wq; 30struct workqueue_struct *wq;
31 31
32struct test_mutex { 32struct test_mutex {
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index b5c1293ce147..1e1bbf171eca 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -29,7 +29,7 @@
29 */ 29 */
30static unsigned int debug_locks_verbose; 30static unsigned int debug_locks_verbose;
31 31
32static DEFINE_WW_CLASS(ww_lockdep); 32static DEFINE_WD_CLASS(ww_lockdep);
33 33
34static int __init setup_debug_locks_verbose(char *str) 34static int __init setup_debug_locks_verbose(char *str)
35{ 35{