aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ww-mutex-design.txt344
-rw-r--r--include/linux/mutex-debug.h1
-rw-r--r--include/linux/mutex.h355
-rw-r--r--kernel/mutex.c318
-rw-r--r--lib/debug_locks.c2
5 files changed, 1003 insertions, 17 deletions
diff --git a/Documentation/ww-mutex-design.txt b/Documentation/ww-mutex-design.txt
new file mode 100644
index 000000000000..8a112dc304c3
--- /dev/null
+++ b/Documentation/ww-mutex-design.txt
@@ -0,0 +1,344 @@
1Wait/Wound Deadlock-Proof Mutex Design
2======================================
3
4Please read mutex-design.txt first, as it applies to wait/wound mutexes too.
5
6Motivation for WW-Mutexes
7-------------------------
8
9GPU's do operations that commonly involve many buffers. Those buffers
10can be shared across contexts/processes, exist in different memory
11domains (for example VRAM vs system memory), and so on. And with
12PRIME / dmabuf, they can even be shared across devices. So there are
13a handful of situations where the driver needs to wait for buffers to
14become ready. If you think about this in terms of waiting on a buffer
15mutex for it to become available, this presents a problem because
16there is no way to guarantee that buffers appear in a execbuf/batch in
17the same order in all contexts. That is directly under control of
18userspace, and a result of the sequence of GL calls that an application
19makes. Which results in the potential for deadlock. The problem gets
20more complex when you consider that the kernel may need to migrate the
21buffer(s) into VRAM before the GPU operates on the buffer(s), which
22may in turn require evicting some other buffers (and you don't want to
23evict other buffers which are already queued up to the GPU), but for a
24simplified understanding of the problem you can ignore this.
25
26The algorithm that the TTM graphics subsystem came up with for dealing with
27this problem is quite simple. For each group of buffers (execbuf) that need
28to be locked, the caller would be assigned a unique reservation id/ticket,
29from a global counter. In case of deadlock while locking all the buffers
30associated with a execbuf, the one with the lowest reservation ticket (i.e.
31the oldest task) wins, and the one with the higher reservation id (i.e. the
32younger task) unlocks all of the buffers that it has already locked, and then
33tries again.
34
35In the RDBMS literature this deadlock handling approach is called wait/wound:
36The older tasks waits until it can acquire the contended lock. The younger tasks
37needs to back off and drop all the locks it is currently holding, i.e. the
38younger task is wounded.
39
40Concepts
41--------
42
43Compared to normal mutexes two additional concepts/objects show up in the lock
44interface for w/w mutexes:
45
46Acquire context: To ensure eventual forward progress it is important the a task
47trying to acquire locks doesn't grab a new reservation id, but keeps the one it
48acquired when starting the lock acquisition. This ticket is stored in the
49acquire context. Furthermore the acquire context keeps track of debugging state
50to catch w/w mutex interface abuse.
51
52W/w class: In contrast to normal mutexes the lock class needs to be explicit for
53w/w mutexes, since it is required to initialize the acquire context.
54
55Furthermore there are three different class of w/w lock acquire functions:
56
57* Normal lock acquisition with a context, using ww_mutex_lock.
58
59* Slowpath lock acquisition on the contending lock, used by the wounded task
60 after having dropped all already acquired locks. These functions have the
61 _slow postfix.
62
63 From a simple semantics point-of-view the _slow functions are not strictly
64 required, since simply calling the normal ww_mutex_lock functions on the
65 contending lock (after having dropped all other already acquired locks) will
66 work correctly. After all if no other ww mutex has been acquired yet there's
67 no deadlock potential and hence the ww_mutex_lock call will block and not
68 prematurely return -EDEADLK. The advantage of the _slow functions is in
69 interface safety:
70 - ww_mutex_lock has a __must_check int return type, whereas ww_mutex_lock_slow
71 has a void return type. Note that since ww mutex code needs loops/retries
72 anyway the __must_check doesn't result in spurious warnings, even though the
73 very first lock operation can never fail.
74 - When full debugging is enabled ww_mutex_lock_slow checks that all acquired
75 ww mutex have been released (preventing deadlocks) and makes sure that we
76 block on the contending lock (preventing spinning through the -EDEADLK
77 slowpath until the contended lock can be acquired).
78
79* Functions to only acquire a single w/w mutex, which results in the exact same
80 semantics as a normal mutex. This is done by calling ww_mutex_lock with a NULL
81 context.
82
83 Again this is not strictly required. But often you only want to acquire a
84 single lock in which case it's pointless to set up an acquire context (and so
85 better to avoid grabbing a deadlock avoidance ticket).
86
87Of course, all the usual variants for handling wake-ups due to signals are also
88provided.
89
90Usage
91-----
92
93Three different ways to acquire locks within the same w/w class. Common
94definitions for methods #1 and #2:
95
96static DEFINE_WW_CLASS(ww_class);
97
98struct obj {
99 struct ww_mutex lock;
100 /* obj data */
101};
102
103struct obj_entry {
104 struct list_head head;
105 struct obj *obj;
106};
107
108Method 1, using a list in execbuf->buffers that's not allowed to be reordered.
109This is useful if a list of required objects is already tracked somewhere.
110Furthermore the lock helper can use propagate the -EALREADY return code back to
111the caller as a signal that an object is twice on the list. This is useful if
112the list is constructed from userspace input and the ABI requires userspace to
113not have duplicate entries (e.g. for a gpu commandbuffer submission ioctl).
114
115int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
116{
117 struct obj *res_obj = NULL;
118 struct obj_entry *contended_entry = NULL;
119 struct obj_entry *entry;
120
121 ww_acquire_init(ctx, &ww_class);
122
123retry:
124 list_for_each_entry (entry, list, head) {
125 if (entry->obj == res_obj) {
126 res_obj = NULL;
127 continue;
128 }
129 ret = ww_mutex_lock(&entry->obj->lock, ctx);
130 if (ret < 0) {
131 contended_entry = entry;
132 goto err;
133 }
134 }
135
136 ww_acquire_done(ctx);
137 return 0;
138
139err:
140 list_for_each_entry_continue_reverse (entry, list, head)
141 ww_mutex_unlock(&entry->obj->lock);
142
143 if (res_obj)
144 ww_mutex_unlock(&res_obj->lock);
145
146 if (ret == -EDEADLK) {
147 /* we lost out in a seqno race, lock and retry.. */
148 ww_mutex_lock_slow(&contended_entry->obj->lock, ctx);
149 res_obj = contended_entry->obj;
150 goto retry;
151 }
152 ww_acquire_fini(ctx);
153
154 return ret;
155}
156
157Method 2, using a list in execbuf->buffers that can be reordered. Same semantics
158of duplicate entry detection using -EALREADY as method 1 above. But the
159list-reordering allows for a bit more idiomatic code.
160
161int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
162{
163 struct obj_entry *entry, *entry2;
164
165 ww_acquire_init(ctx, &ww_class);
166
167 list_for_each_entry (entry, list, head) {
168 ret = ww_mutex_lock(&entry->obj->lock, ctx);
169 if (ret < 0) {
170 entry2 = entry;
171
172 list_for_each_entry_continue_reverse (entry2, list, head)
173 ww_mutex_unlock(&entry2->obj->lock);
174
175 if (ret != -EDEADLK) {
176 ww_acquire_fini(ctx);
177 return ret;
178 }
179
180 /* we lost out in a seqno race, lock and retry.. */
181 ww_mutex_lock_slow(&entry->obj->lock, ctx);
182
183 /*
184 * Move buf to head of the list, this will point
185 * buf->next to the first unlocked entry,
186 * restarting the for loop.
187 */
188 list_del(&entry->head);
189 list_add(&entry->head, list);
190 }
191 }
192
193 ww_acquire_done(ctx);
194 return 0;
195}
196
197Unlocking works the same way for both methods #1 and #2:
198
199void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
200{
201 struct obj_entry *entry;
202
203 list_for_each_entry (entry, list, head)
204 ww_mutex_unlock(&entry->obj->lock);
205
206 ww_acquire_fini(ctx);
207}
208
209Method 3 is useful if the list of objects is constructed ad-hoc and not upfront,
210e.g. when adjusting edges in a graph where each node has its own ww_mutex lock,
211and edges can only be changed when holding the locks of all involved nodes. w/w
212mutexes are a natural fit for such a case for two reasons:
213- They can handle lock-acquisition in any order which allows us to start walking
214 a graph from a starting point and then iteratively discovering new edges and
215 locking down the nodes those edges connect to.
216- Due to the -EALREADY return code signalling that a given objects is already
217 held there's no need for additional book-keeping to break cycles in the graph
218 or keep track off which looks are already held (when using more than one node
219 as a starting point).
220
221Note that this approach differs in two important ways from the above methods:
222- Since the list of objects is dynamically constructed (and might very well be
223 different when retrying due to hitting the -EDEADLK wound condition) there's
224 no need to keep any object on a persistent list when it's not locked. We can
225 therefore move the list_head into the object itself.
226- On the other hand the dynamic object list construction also means that the -EALREADY return
227 code can't be propagated.
228
229Note also that methods #1 and #2 and method #3 can be combined, e.g. to first lock a
230list of starting nodes (passed in from userspace) using one of the above
231methods. And then lock any additional objects affected by the operations using
232method #3 below. The backoff/retry procedure will be a bit more involved, since
233when the dynamic locking step hits -EDEADLK we also need to unlock all the
234objects acquired with the fixed list. But the w/w mutex debug checks will catch
235any interface misuse for these cases.
236
237Also, method 3 can't fail the lock acquisition step since it doesn't return
238-EALREADY. Of course this would be different when using the _interruptible
239variants, but that's outside of the scope of these examples here.
240
241struct obj {
242 struct ww_mutex ww_mutex;
243 struct list_head locked_list;
244};
245
246static DEFINE_WW_CLASS(ww_class);
247
248void __unlock_objs(struct list_head *list)
249{
250 struct obj *entry, *temp;
251
252 list_for_each_entry_safe (entry, temp, list, locked_list) {
253 /* need to do that before unlocking, since only the current lock holder is
254 allowed to use object */
255 list_del(&entry->locked_list);
256 ww_mutex_unlock(entry->ww_mutex)
257 }
258}
259
260void lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
261{
262 struct obj *obj;
263
264 ww_acquire_init(ctx, &ww_class);
265
266retry:
267 /* re-init loop start state */
268 loop {
269 /* magic code which walks over a graph and decides which objects
270 * to lock */
271
272 ret = ww_mutex_lock(obj->ww_mutex, ctx);
273 if (ret == -EALREADY) {
274 /* we have that one already, get to the next object */
275 continue;
276 }
277 if (ret == -EDEADLK) {
278 __unlock_objs(list);
279
280 ww_mutex_lock_slow(obj, ctx);
281 list_add(&entry->locked_list, list);
282 goto retry;
283 }
284
285 /* locked a new object, add it to the list */
286 list_add_tail(&entry->locked_list, list);
287 }
288
289 ww_acquire_done(ctx);
290 return 0;
291}
292
293void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
294{
295 __unlock_objs(list);
296 ww_acquire_fini(ctx);
297}
298
299Method 4: Only lock one single objects. In that case deadlock detection and
300prevention is obviously overkill, since with grabbing just one lock you can't
301produce a deadlock within just one class. To simplify this case the w/w mutex
302api can be used with a NULL context.
303
304Implementation Details
305----------------------
306
307Design:
308 ww_mutex currently encapsulates a struct mutex, this means no extra overhead for
309 normal mutex locks, which are far more common. As such there is only a small
310 increase in code size if wait/wound mutexes are not used.
311
312 In general, not much contention is expected. The locks are typically used to
313 serialize access to resources for devices. The only way to make wakeups
314 smarter would be at the cost of adding a field to struct mutex_waiter. This
315 would add overhead to all cases where normal mutexes are used, and
316 ww_mutexes are generally less performance sensitive.
317
318Lockdep:
319 Special care has been taken to warn for as many cases of api abuse
320 as possible. Some common api abuses will be caught with
321 CONFIG_DEBUG_MUTEXES, but CONFIG_PROVE_LOCKING is recommended.
322
323 Some of the errors which will be warned about:
324 - Forgetting to call ww_acquire_fini or ww_acquire_init.
325 - Attempting to lock more mutexes after ww_acquire_done.
326 - Attempting to lock the wrong mutex after -EDEADLK and
327 unlocking all mutexes.
328 - Attempting to lock the right mutex after -EDEADLK,
329 before unlocking all mutexes.
330
331 - Calling ww_mutex_lock_slow before -EDEADLK was returned.
332
333 - Unlocking mutexes with the wrong unlock function.
334 - Calling one of the ww_acquire_* twice on the same context.
335 - Using a different ww_class for the mutex than for the ww_acquire_ctx.
336 - Normal lockdep errors that can result in deadlocks.
337
338 Some of the lockdep errors that can result in deadlocks:
339 - Calling ww_acquire_init to initialize a second ww_acquire_ctx before
340 having called ww_acquire_fini on the first.
341 - 'normal' deadlocks that can occur.
342
343FIXME: Update this section once we have the TASK_DEADLOCK task state flag magic
344implemented.
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 731d77d6e155..4ac8b1977b73 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/lockdep.h> 5#include <linux/lockdep.h>
6#include <linux/debug_locks.h>
6 7
7/* 8/*
8 * Mutexes - debugging helpers: 9 * Mutexes - debugging helpers:
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 433da8a1a426..a56b0ccc8a6c 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -10,6 +10,7 @@
10#ifndef __LINUX_MUTEX_H 10#ifndef __LINUX_MUTEX_H
11#define __LINUX_MUTEX_H 11#define __LINUX_MUTEX_H
12 12
13#include <asm/current.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
15#include <linux/linkage.h> 16#include <linux/linkage.h>
@@ -77,6 +78,36 @@ struct mutex_waiter {
77#endif 78#endif
78}; 79};
79 80
81struct ww_class {
82 atomic_long_t stamp;
83 struct lock_class_key acquire_key;
84 struct lock_class_key mutex_key;
85 const char *acquire_name;
86 const char *mutex_name;
87};
88
89struct ww_acquire_ctx {
90 struct task_struct *task;
91 unsigned long stamp;
92 unsigned acquired;
93#ifdef CONFIG_DEBUG_MUTEXES
94 unsigned done_acquire;
95 struct ww_class *ww_class;
96 struct ww_mutex *contending_lock;
97#endif
98#ifdef CONFIG_DEBUG_LOCK_ALLOC
99 struct lockdep_map dep_map;
100#endif
101};
102
103struct ww_mutex {
104 struct mutex base;
105 struct ww_acquire_ctx *ctx;
106#ifdef CONFIG_DEBUG_MUTEXES
107 struct ww_class *ww_class;
108#endif
109};
110
80#ifdef CONFIG_DEBUG_MUTEXES 111#ifdef CONFIG_DEBUG_MUTEXES
81# include <linux/mutex-debug.h> 112# include <linux/mutex-debug.h>
82#else 113#else
@@ -101,8 +132,11 @@ static inline void mutex_destroy(struct mutex *lock) {}
101#ifdef CONFIG_DEBUG_LOCK_ALLOC 132#ifdef CONFIG_DEBUG_LOCK_ALLOC
102# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 133# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
103 , .dep_map = { .name = #lockname } 134 , .dep_map = { .name = #lockname }
135# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
136 , .ww_class = &ww_class
104#else 137#else
105# define __DEP_MAP_MUTEX_INITIALIZER(lockname) 138# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
139# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
106#endif 140#endif
107 141
108#define __MUTEX_INITIALIZER(lockname) \ 142#define __MUTEX_INITIALIZER(lockname) \
@@ -112,13 +146,49 @@ static inline void mutex_destroy(struct mutex *lock) {}
112 __DEBUG_MUTEX_INITIALIZER(lockname) \ 146 __DEBUG_MUTEX_INITIALIZER(lockname) \
113 __DEP_MAP_MUTEX_INITIALIZER(lockname) } 147 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
114 148
149#define __WW_CLASS_INITIALIZER(ww_class) \
150 { .stamp = ATOMIC_LONG_INIT(0) \
151 , .acquire_name = #ww_class "_acquire" \
152 , .mutex_name = #ww_class "_mutex" }
153
154#define __WW_MUTEX_INITIALIZER(lockname, class) \
155 { .base = { \__MUTEX_INITIALIZER(lockname) } \
156 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
157
115#define DEFINE_MUTEX(mutexname) \ 158#define DEFINE_MUTEX(mutexname) \
116 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 159 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
117 160
161#define DEFINE_WW_CLASS(classname) \
162 struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
163
164#define DEFINE_WW_MUTEX(mutexname, ww_class) \
165 struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
166
167
118extern void __mutex_init(struct mutex *lock, const char *name, 168extern void __mutex_init(struct mutex *lock, const char *name,
119 struct lock_class_key *key); 169 struct lock_class_key *key);
120 170
121/** 171/**
172 * ww_mutex_init - initialize the w/w mutex
173 * @lock: the mutex to be initialized
174 * @ww_class: the w/w class the mutex should belong to
175 *
176 * Initialize the w/w mutex to unlocked state and associate it with the given
177 * class.
178 *
179 * It is not allowed to initialize an already locked mutex.
180 */
181static inline void ww_mutex_init(struct ww_mutex *lock,
182 struct ww_class *ww_class)
183{
184 __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
185 lock->ctx = NULL;
186#ifdef CONFIG_DEBUG_MUTEXES
187 lock->ww_class = ww_class;
188#endif
189}
190
191/**
122 * mutex_is_locked - is the mutex locked 192 * mutex_is_locked - is the mutex locked
123 * @lock: the mutex to be queried 193 * @lock: the mutex to be queried
124 * 194 *
@@ -136,6 +206,7 @@ static inline int mutex_is_locked(struct mutex *lock)
136#ifdef CONFIG_DEBUG_LOCK_ALLOC 206#ifdef CONFIG_DEBUG_LOCK_ALLOC
137extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 207extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
138extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 208extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
209
139extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 210extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
140 unsigned int subclass); 211 unsigned int subclass);
141extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 212extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
@@ -147,7 +218,7 @@ extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
147 218
148#define mutex_lock_nest_lock(lock, nest_lock) \ 219#define mutex_lock_nest_lock(lock, nest_lock) \
149do { \ 220do { \
150 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ 221 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
151 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 222 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
152} while (0) 223} while (0)
153 224
@@ -170,6 +241,288 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
170 */ 241 */
171extern int mutex_trylock(struct mutex *lock); 242extern int mutex_trylock(struct mutex *lock);
172extern void mutex_unlock(struct mutex *lock); 243extern void mutex_unlock(struct mutex *lock);
244
245/**
246 * ww_acquire_init - initialize a w/w acquire context
247 * @ctx: w/w acquire context to initialize
248 * @ww_class: w/w class of the context
249 *
250 * Initializes an context to acquire multiple mutexes of the given w/w class.
251 *
252 * Context-based w/w mutex acquiring can be done in any order whatsoever within
253 * a given lock class. Deadlocks will be detected and handled with the
254 * wait/wound logic.
255 *
256 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
257 * result in undetected deadlocks and is so forbidden. Mixing different contexts
258 * for the same w/w class when acquiring mutexes can also result in undetected
259 * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
260 * enabling CONFIG_PROVE_LOCKING.
261 *
262 * Nesting of acquire contexts for _different_ w/w classes is possible, subject
263 * to the usual locking rules between different lock classes.
264 *
265 * An acquire context must be released with ww_acquire_fini by the same task
266 * before the memory is freed. It is recommended to allocate the context itself
267 * on the stack.
268 */
269static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
270 struct ww_class *ww_class)
271{
272 ctx->task = current;
273 ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
274 ctx->acquired = 0;
275#ifdef CONFIG_DEBUG_MUTEXES
276 ctx->ww_class = ww_class;
277 ctx->done_acquire = 0;
278 ctx->contending_lock = NULL;
279#endif
280#ifdef CONFIG_DEBUG_LOCK_ALLOC
281 debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
282 lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
283 &ww_class->acquire_key, 0);
284 mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
285#endif
286}
287
288/**
289 * ww_acquire_done - marks the end of the acquire phase
290 * @ctx: the acquire context
291 *
292 * Marks the end of the acquire phase, any further w/w mutex lock calls using
293 * this context are forbidden.
294 *
295 * Calling this function is optional, it is just useful to document w/w mutex
296 * code and clearly designated the acquire phase from actually using the locked
297 * data structures.
298 */
299static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
300{
301#ifdef CONFIG_DEBUG_MUTEXES
302 lockdep_assert_held(ctx);
303
304 DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
305 ctx->done_acquire = 1;
306#endif
307}
308
309/**
310 * ww_acquire_fini - releases a w/w acquire context
311 * @ctx: the acquire context to free
312 *
313 * Releases a w/w acquire context. This must be called _after_ all acquired w/w
314 * mutexes have been released with ww_mutex_unlock.
315 */
316static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
317{
318#ifdef CONFIG_DEBUG_MUTEXES
319 mutex_release(&ctx->dep_map, 0, _THIS_IP_);
320
321 DEBUG_LOCKS_WARN_ON(ctx->acquired);
322 if (!config_enabled(CONFIG_PROVE_LOCKING))
323 /*
324 * lockdep will normally handle this,
325 * but fail without anyway
326 */
327 ctx->done_acquire = 1;
328
329 if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
330 /* ensure ww_acquire_fini will still fail if called twice */
331 ctx->acquired = ~0U;
332#endif
333}
334
335extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
336 struct ww_acquire_ctx *ctx);
337extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
338 struct ww_acquire_ctx *ctx);
339
340/**
341 * ww_mutex_lock - acquire the w/w mutex
342 * @lock: the mutex to be acquired
343 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
344 *
345 * Lock the w/w mutex exclusively for this task.
346 *
347 * Deadlocks within a given w/w class of locks are detected and handled with the
348 * wait/wound algorithm. If the lock isn't immediately avaiable this function
349 * will either sleep until it is (wait case). Or it selects the current context
350 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
351 * same lock with the same context twice is also detected and signalled by
352 * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
353 *
354 * In the wound case the caller must release all currently held w/w mutexes for
355 * the given context and then wait for this contending lock to be available by
356 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
357 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
358 * scanning through lru lists trying to free resources).
359 *
360 * The mutex must later on be released by the same task that
361 * acquired it. The task may not exit without first unlocking the mutex. Also,
362 * kernel memory where the mutex resides must not be freed with the mutex still
363 * locked. The mutex must first be initialized (or statically defined) before it
364 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
365 * of the same w/w lock class as was used to initialize the acquire context.
366 *
367 * A mutex acquired with this function must be released with ww_mutex_unlock.
368 */
369static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
370{
371 if (ctx)
372 return __ww_mutex_lock(lock, ctx);
373 else {
374 mutex_lock(&lock->base);
375 return 0;
376 }
377}
378
379/**
380 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
381 * @lock: the mutex to be acquired
382 * @ctx: w/w acquire context
383 *
384 * Lock the w/w mutex exclusively for this task.
385 *
386 * Deadlocks within a given w/w class of locks are detected and handled with the
387 * wait/wound algorithm. If the lock isn't immediately avaiable this function
388 * will either sleep until it is (wait case). Or it selects the current context
389 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
390 * same lock with the same context twice is also detected and signalled by
391 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
392 * signal arrives while waiting for the lock then this function returns -EINTR.
393 *
394 * In the wound case the caller must release all currently held w/w mutexes for
395 * the given context and then wait for this contending lock to be available by
396 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
397 * not acquire this lock and proceed with trying to acquire further w/w mutexes
398 * (e.g. when scanning through lru lists trying to free resources).
399 *
400 * The mutex must later on be released by the same task that
401 * acquired it. The task may not exit without first unlocking the mutex. Also,
402 * kernel memory where the mutex resides must not be freed with the mutex still
403 * locked. The mutex must first be initialized (or statically defined) before it
404 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
405 * of the same w/w lock class as was used to initialize the acquire context.
406 *
407 * A mutex acquired with this function must be released with ww_mutex_unlock.
408 */
409static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
410 struct ww_acquire_ctx *ctx)
411{
412 if (ctx)
413 return __ww_mutex_lock_interruptible(lock, ctx);
414 else
415 return mutex_lock_interruptible(&lock->base);
416}
417
418/**
419 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
420 * @lock: the mutex to be acquired
421 * @ctx: w/w acquire context
422 *
423 * Acquires a w/w mutex with the given context after a wound case. This function
424 * will sleep until the lock becomes available.
425 *
426 * The caller must have released all w/w mutexes already acquired with the
427 * context and then call this function on the contended lock.
428 *
429 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
430 * needs with ww_mutex_lock. Note that the -EALREADY return code from
431 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
432 *
433 * It is forbidden to call this function with any other w/w mutexes associated
434 * with the context held. It is forbidden to call this on anything else than the
435 * contending mutex.
436 *
437 * Note that the slowpath lock acquiring can also be done by calling
438 * ww_mutex_lock directly. This function here is simply to help w/w mutex
439 * locking code readability by clearly denoting the slowpath.
440 */
441static inline void
442ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
443{
444 int ret;
445#ifdef CONFIG_DEBUG_MUTEXES
446 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
447#endif
448 ret = ww_mutex_lock(lock, ctx);
449 (void)ret;
450}
451
452/**
453 * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex,
454 * interruptible
455 * @lock: the mutex to be acquired
456 * @ctx: w/w acquire context
457 *
458 * Acquires a w/w mutex with the given context after a wound case. This function
459 * will sleep until the lock becomes available and returns 0 when the lock has
460 * been acquired. If a signal arrives while waiting for the lock then this
461 * function returns -EINTR.
462 *
463 * The caller must have released all w/w mutexes already acquired with the
464 * context and then call this function on the contended lock.
465 *
466 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
467 * needs with ww_mutex_lock. Note that the -EALREADY return code from
468 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
469 *
470 * It is forbidden to call this function with any other w/w mutexes associated
471 * with the given context held. It is forbidden to call this on anything else
472 * than the contending mutex.
473 *
474 * Note that the slowpath lock acquiring can also be done by calling
475 * ww_mutex_lock_interruptible directly. This function here is simply to help
476 * w/w mutex locking code readability by clearly denoting the slowpath.
477 */
478static inline int __must_check
479ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
480 struct ww_acquire_ctx *ctx)
481{
482#ifdef CONFIG_DEBUG_MUTEXES
483 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
484#endif
485 return ww_mutex_lock_interruptible(lock, ctx);
486}
487
488extern void ww_mutex_unlock(struct ww_mutex *lock);
489
490/**
491 * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
492 * @lock: mutex to lock
493 *
494 * Trylocks a mutex without acquire context, so no deadlock detection is
495 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
496 */
497static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
498{
499 return mutex_trylock(&lock->base);
500}
501
502/***
503 * ww_mutex_destroy - mark a w/w mutex unusable
504 * @lock: the mutex to be destroyed
505 *
506 * This function marks the mutex uninitialized, and any subsequent
507 * use of the mutex is forbidden. The mutex must not be locked when
508 * this function is called.
509 */
510static inline void ww_mutex_destroy(struct ww_mutex *lock)
511{
512 mutex_destroy(&lock->base);
513}
514
515/**
516 * ww_mutex_is_locked - is the w/w mutex locked
517 * @lock: the mutex to be queried
518 *
519 * Returns 1 if the mutex is locked, 0 if unlocked.
520 */
521static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
522{
523 return mutex_is_locked(&lock->base);
524}
525
173extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 526extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
174 527
175#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX 528#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 42f8dda2467b..fc801aafe8fd 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -254,16 +254,165 @@ void __sched mutex_unlock(struct mutex *lock)
254 254
255EXPORT_SYMBOL(mutex_unlock); 255EXPORT_SYMBOL(mutex_unlock);
256 256
257/**
258 * ww_mutex_unlock - release the w/w mutex
259 * @lock: the mutex to be released
260 *
261 * Unlock a mutex that has been locked by this task previously with any of the
262 * ww_mutex_lock* functions (with or without an acquire context). It is
263 * forbidden to release the locks after releasing the acquire context.
264 *
265 * This function must not be used in interrupt context. Unlocking
266 * of a unlocked mutex is not allowed.
267 */
268void __sched ww_mutex_unlock(struct ww_mutex *lock)
269{
270 /*
271 * The unlocking fastpath is the 0->1 transition from 'locked'
272 * into 'unlocked' state:
273 */
274 if (lock->ctx) {
275#ifdef CONFIG_DEBUG_MUTEXES
276 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
277#endif
278 if (lock->ctx->acquired > 0)
279 lock->ctx->acquired--;
280 lock->ctx = NULL;
281 }
282
283#ifndef CONFIG_DEBUG_MUTEXES
284 /*
285 * When debugging is enabled we must not clear the owner before time,
286 * the slow path will always be taken, and that clears the owner field
287 * after verifying that it was indeed current.
288 */
289 mutex_clear_owner(&lock->base);
290#endif
291 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
292}
293EXPORT_SYMBOL(ww_mutex_unlock);
294
295static inline int __sched
296__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
297{
298 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
299 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
300
301 if (!hold_ctx)
302 return 0;
303
304 if (unlikely(ctx == hold_ctx))
305 return -EALREADY;
306
307 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
308 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
309#ifdef CONFIG_DEBUG_MUTEXES
310 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
311 ctx->contending_lock = ww;
312#endif
313 return -EDEADLK;
314 }
315
316 return 0;
317}
318
319static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
320 struct ww_acquire_ctx *ww_ctx)
321{
322#ifdef CONFIG_DEBUG_MUTEXES
323 /*
324 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
325 * but released with a normal mutex_unlock in this call.
326 *
327 * This should never happen, always use ww_mutex_unlock.
328 */
329 DEBUG_LOCKS_WARN_ON(ww->ctx);
330
331 /*
332 * Not quite done after calling ww_acquire_done() ?
333 */
334 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
335
336 if (ww_ctx->contending_lock) {
337 /*
338 * After -EDEADLK you tried to
339 * acquire a different ww_mutex? Bad!
340 */
341 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
342
343 /*
344 * You called ww_mutex_lock after receiving -EDEADLK,
345 * but 'forgot' to unlock everything else first?
346 */
347 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
348 ww_ctx->contending_lock = NULL;
349 }
350
351 /*
352 * Naughty, using a different class will lead to undefined behavior!
353 */
354 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
355#endif
356 ww_ctx->acquired++;
357}
358
359/*
360 * after acquiring lock with fastpath or when we lost out in contested
361 * slowpath, set ctx and wake up any waiters so they can recheck.
362 *
363 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
364 * as the fastpath and opportunistic spinning are disabled in that case.
365 */
366static __always_inline void
367ww_mutex_set_context_fastpath(struct ww_mutex *lock,
368 struct ww_acquire_ctx *ctx)
369{
370 unsigned long flags;
371 struct mutex_waiter *cur;
372
373 ww_mutex_lock_acquired(lock, ctx);
374
375 lock->ctx = ctx;
376
377 /*
378 * The lock->ctx update should be visible on all cores before
379 * the atomic read is done, otherwise contended waiters might be
380 * missed. The contended waiters will either see ww_ctx == NULL
381 * and keep spinning, or it will acquire wait_lock, add itself
382 * to waiter list and sleep.
383 */
384 smp_mb(); /* ^^^ */
385
386 /*
387 * Check if lock is contended, if not there is nobody to wake up
388 */
389 if (likely(atomic_read(&lock->base.count) == 0))
390 return;
391
392 /*
393 * Uh oh, we raced in fastpath, wake up everyone in this case,
394 * so they can see the new lock->ctx.
395 */
396 spin_lock_mutex(&lock->base.wait_lock, flags);
397 list_for_each_entry(cur, &lock->base.wait_list, list) {
398 debug_mutex_wake_waiter(&lock->base, cur);
399 wake_up_process(cur->task);
400 }
401 spin_unlock_mutex(&lock->base.wait_lock, flags);
402}
403
257/* 404/*
258 * Lock a mutex (possibly interruptible), slowpath: 405 * Lock a mutex (possibly interruptible), slowpath:
259 */ 406 */
260static inline int __sched 407static __always_inline int __sched
261__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 408__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
262 struct lockdep_map *nest_lock, unsigned long ip) 409 struct lockdep_map *nest_lock, unsigned long ip,
410 struct ww_acquire_ctx *ww_ctx)
263{ 411{
264 struct task_struct *task = current; 412 struct task_struct *task = current;
265 struct mutex_waiter waiter; 413 struct mutex_waiter waiter;
266 unsigned long flags; 414 unsigned long flags;
415 int ret;
267 416
268 preempt_disable(); 417 preempt_disable();
269 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 418 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
@@ -298,6 +447,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
298 struct task_struct *owner; 447 struct task_struct *owner;
299 struct mspin_node node; 448 struct mspin_node node;
300 449
450 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
451 struct ww_mutex *ww;
452
453 ww = container_of(lock, struct ww_mutex, base);
454 /*
455 * If ww->ctx is set the contents are undefined, only
456 * by acquiring wait_lock there is a guarantee that
457 * they are not invalid when reading.
458 *
459 * As such, when deadlock detection needs to be
460 * performed the optimistic spinning cannot be done.
461 */
462 if (ACCESS_ONCE(ww->ctx))
463 break;
464 }
465
301 /* 466 /*
302 * If there's an owner, wait for it to either 467 * If there's an owner, wait for it to either
303 * release the lock or go to sleep. 468 * release the lock or go to sleep.
@@ -312,6 +477,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
312 if ((atomic_read(&lock->count) == 1) && 477 if ((atomic_read(&lock->count) == 1) &&
313 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 478 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
314 lock_acquired(&lock->dep_map, ip); 479 lock_acquired(&lock->dep_map, ip);
480 if (!__builtin_constant_p(ww_ctx == NULL)) {
481 struct ww_mutex *ww;
482 ww = container_of(lock, struct ww_mutex, base);
483
484 ww_mutex_set_context_fastpath(ww, ww_ctx);
485 }
486
315 mutex_set_owner(lock); 487 mutex_set_owner(lock);
316 mspin_unlock(MLOCK(lock), &node); 488 mspin_unlock(MLOCK(lock), &node);
317 preempt_enable(); 489 preempt_enable();
@@ -371,15 +543,16 @@ slowpath:
371 * TASK_UNINTERRUPTIBLE case.) 543 * TASK_UNINTERRUPTIBLE case.)
372 */ 544 */
373 if (unlikely(signal_pending_state(state, task))) { 545 if (unlikely(signal_pending_state(state, task))) {
374 mutex_remove_waiter(lock, &waiter, 546 ret = -EINTR;
375 task_thread_info(task)); 547 goto err;
376 mutex_release(&lock->dep_map, 1, ip); 548 }
377 spin_unlock_mutex(&lock->wait_lock, flags);
378 549
379 debug_mutex_free_waiter(&waiter); 550 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
380 preempt_enable(); 551 ret = __mutex_lock_check_stamp(lock, ww_ctx);
381 return -EINTR; 552 if (ret)
553 goto err;
382 } 554 }
555
383 __set_task_state(task, state); 556 __set_task_state(task, state);
384 557
385 /* didn't get the lock, go to sleep: */ 558 /* didn't get the lock, go to sleep: */
@@ -394,6 +567,30 @@ done:
394 mutex_remove_waiter(lock, &waiter, current_thread_info()); 567 mutex_remove_waiter(lock, &waiter, current_thread_info());
395 mutex_set_owner(lock); 568 mutex_set_owner(lock);
396 569
570 if (!__builtin_constant_p(ww_ctx == NULL)) {
571 struct ww_mutex *ww = container_of(lock,
572 struct ww_mutex,
573 base);
574 struct mutex_waiter *cur;
575
576 /*
577 * This branch gets optimized out for the common case,
578 * and is only important for ww_mutex_lock.
579 */
580
581 ww_mutex_lock_acquired(ww, ww_ctx);
582 ww->ctx = ww_ctx;
583
584 /*
585 * Give any possible sleeping processes the chance to wake up,
586 * so they can recheck if they have to back off.
587 */
588 list_for_each_entry(cur, &lock->wait_list, list) {
589 debug_mutex_wake_waiter(lock, cur);
590 wake_up_process(cur->task);
591 }
592 }
593
397 /* set it to 0 if there are no waiters left: */ 594 /* set it to 0 if there are no waiters left: */
398 if (likely(list_empty(&lock->wait_list))) 595 if (likely(list_empty(&lock->wait_list)))
399 atomic_set(&lock->count, 0); 596 atomic_set(&lock->count, 0);
@@ -404,6 +601,14 @@ done:
404 preempt_enable(); 601 preempt_enable();
405 602
406 return 0; 603 return 0;
604
605err:
606 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
607 spin_unlock_mutex(&lock->wait_lock, flags);
608 debug_mutex_free_waiter(&waiter);
609 mutex_release(&lock->dep_map, 1, ip);
610 preempt_enable();
611 return ret;
407} 612}
408 613
409#ifdef CONFIG_DEBUG_LOCK_ALLOC 614#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -411,7 +616,8 @@ void __sched
411mutex_lock_nested(struct mutex *lock, unsigned int subclass) 616mutex_lock_nested(struct mutex *lock, unsigned int subclass)
412{ 617{
413 might_sleep(); 618 might_sleep();
414 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 619 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
620 subclass, NULL, _RET_IP_, NULL);
415} 621}
416 622
417EXPORT_SYMBOL_GPL(mutex_lock_nested); 623EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -420,7 +626,8 @@ void __sched
420_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 626_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
421{ 627{
422 might_sleep(); 628 might_sleep();
423 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 629 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
630 0, nest, _RET_IP_, NULL);
424} 631}
425 632
426EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 633EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -429,7 +636,8 @@ int __sched
429mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 636mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
430{ 637{
431 might_sleep(); 638 might_sleep();
432 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 639 return __mutex_lock_common(lock, TASK_KILLABLE,
640 subclass, NULL, _RET_IP_, NULL);
433} 641}
434EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 642EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
435 643
@@ -438,10 +646,30 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
438{ 646{
439 might_sleep(); 647 might_sleep();
440 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 648 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
441 subclass, NULL, _RET_IP_); 649 subclass, NULL, _RET_IP_, NULL);
442} 650}
443 651
444EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 652EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
653
654
655int __sched
656__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
657{
658 might_sleep();
659 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
660 0, &ctx->dep_map, _RET_IP_, ctx);
661}
662EXPORT_SYMBOL_GPL(__ww_mutex_lock);
663
664int __sched
665__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
666{
667 might_sleep();
668 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
669 0, &ctx->dep_map, _RET_IP_, ctx);
670}
671EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
672
445#endif 673#endif
446 674
447/* 675/*
@@ -544,20 +772,39 @@ __mutex_lock_slowpath(atomic_t *lock_count)
544{ 772{
545 struct mutex *lock = container_of(lock_count, struct mutex, count); 773 struct mutex *lock = container_of(lock_count, struct mutex, count);
546 774
547 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 775 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
776 NULL, _RET_IP_, NULL);
548} 777}
549 778
550static noinline int __sched 779static noinline int __sched
551__mutex_lock_killable_slowpath(struct mutex *lock) 780__mutex_lock_killable_slowpath(struct mutex *lock)
552{ 781{
553 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 782 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
783 NULL, _RET_IP_, NULL);
554} 784}
555 785
556static noinline int __sched 786static noinline int __sched
557__mutex_lock_interruptible_slowpath(struct mutex *lock) 787__mutex_lock_interruptible_slowpath(struct mutex *lock)
558{ 788{
559 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 789 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
790 NULL, _RET_IP_, NULL);
791}
792
793static noinline int __sched
794__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
795{
796 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
797 NULL, _RET_IP_, ctx);
560} 798}
799
800static noinline int __sched
801__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
802 struct ww_acquire_ctx *ctx)
803{
804 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
805 NULL, _RET_IP_, ctx);
806}
807
561#endif 808#endif
562 809
563/* 810/*
@@ -613,6 +860,45 @@ int __sched mutex_trylock(struct mutex *lock)
613} 860}
614EXPORT_SYMBOL(mutex_trylock); 861EXPORT_SYMBOL(mutex_trylock);
615 862
863#ifndef CONFIG_DEBUG_LOCK_ALLOC
864int __sched
865__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
866{
867 int ret;
868
869 might_sleep();
870
871 ret = __mutex_fastpath_lock_retval(&lock->base.count);
872
873 if (likely(!ret)) {
874 ww_mutex_set_context_fastpath(lock, ctx);
875 mutex_set_owner(&lock->base);
876 } else
877 ret = __ww_mutex_lock_slowpath(lock, ctx);
878 return ret;
879}
880EXPORT_SYMBOL(__ww_mutex_lock);
881
882int __sched
883__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
884{
885 int ret;
886
887 might_sleep();
888
889 ret = __mutex_fastpath_lock_retval(&lock->base.count);
890
891 if (likely(!ret)) {
892 ww_mutex_set_context_fastpath(lock, ctx);
893 mutex_set_owner(&lock->base);
894 } else
895 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
896 return ret;
897}
898EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
899
900#endif
901
616/** 902/**
617 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 903 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
618 * @cnt: the atomic which we are to dec 904 * @cnt: the atomic which we are to dec
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index f2fa60c59343..96c4c633d95e 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
30 * a locking bug is detected. 30 * a locking bug is detected.
31 */ 31 */
32int debug_locks_silent; 32int debug_locks_silent;
33EXPORT_SYMBOL_GPL(debug_locks_silent);
33 34
34/* 35/*
35 * Generic 'turn off all lock debugging' function: 36 * Generic 'turn off all lock debugging' function:
@@ -44,3 +45,4 @@ int debug_locks_off(void)
44 } 45 }
45 return 0; 46 return 0;
46} 47}
48EXPORT_SYMBOL_GPL(debug_locks_off);