aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-06-27 06:42:09 -0400
committerDave Airlie <airlied@redhat.com>2013-06-27 06:42:09 -0400
commitdc0216445cdc5b937b78a02f1145a2fbcf92e9d6 (patch)
treeeff2e326fe6369a948f57f2991232b97f08384b9
parent4300a0f8bdcce5a03b88bfa16fc9827e15c52dc4 (diff)
parent166989e366ffa66108b2f37b870e66b85b2185ad (diff)
Merge branch 'core/mutexes' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into drm-next
Merge in the tip core/mutexes branch for future GPU driver use. Ingo will send this branch to Linus prior to drm-next. * 'core/mutexes' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits) locking-selftests: Handle unexpected failures more strictly mutex: Add more w/w tests to test EDEADLK path handling mutex: Add more tests to lib/locking-selftest.c mutex: Add w/w tests to lib/locking-selftest.c mutex: Add w/w mutex slowpath debugging mutex: Add support for wound/wait style locks arch: Make __mutex_fastpath_lock_retval return whether fastpath succeeded or not powerpc/pci: Fix boot panic on mpc83xx (regression) s390/ipl: Fix FCP WWPN and LUN format strings for read fs: fix new splice.c kernel-doc warning spi/pxa2xx: fix memory corruption due to wrong size used in devm_kzalloc() s390/mem_detect: fix memory hole handling s390/dma: support debug_dma_mapping_error s390/dma: fix mapping_error detection s390/irq: Only define synchronize_irq() on SMP Input: xpad - fix for "Mad Catz Street Fighter IV FightPad" controllers Input: wacom - add a new stylus (0x100802) for Intuos5 and Cintiqs spi/pxa2xx: use GFP_ATOMIC in sg table allocation fuse: hold i_mutex in fuse_file_fallocate() Input: add missing dependencies on CONFIG_HAS_IOMEM ...
-rw-r--r--Documentation/ww-mutex-design.txt344
-rw-r--r--arch/ia64/include/asm/mutex.h10
-rw-r--r--arch/powerpc/include/asm/mutex.h10
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c24
-rw-r--r--arch/s390/include/asm/dma-mapping.h3
-rw-r--r--arch/s390/kernel/ipl.c8
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/mm/mem_detect.c3
-rw-r--r--arch/sh/include/asm/mutex-llsc.h4
-rw-r--r--arch/x86/include/asm/mutex_32.h11
-rw-r--r--arch/x86/include/asm/mutex_64.h11
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/Kconfig1
-rw-r--r--drivers/input/serio/Kconfig1
-rw-r--r--drivers/input/tablet/wacom_wac.c2
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c28
-rw-r--r--drivers/input/touchscreen/cyttsp_core.h2
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-s3c64xx.c2
-rw-r--r--fs/fuse/file.c12
-rw-r--r--fs/splice.c1
-rw-r--r--include/asm-generic/mutex-dec.h10
-rw-r--r--include/asm-generic/mutex-null.h2
-rw-r--r--include/asm-generic/mutex-xchg.h10
-rw-r--r--include/linux/mutex-debug.h1
-rw-r--r--include/linux/mutex.h363
-rw-r--r--kernel/mutex.c384
-rw-r--r--lib/Kconfig.debug13
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/locking-selftest.c720
31 files changed, 1861 insertions, 129 deletions
diff --git a/Documentation/ww-mutex-design.txt b/Documentation/ww-mutex-design.txt
new file mode 100644
index 000000000000..8a112dc304c3
--- /dev/null
+++ b/Documentation/ww-mutex-design.txt
@@ -0,0 +1,344 @@
1Wait/Wound Deadlock-Proof Mutex Design
2======================================
3
4Please read mutex-design.txt first, as it applies to wait/wound mutexes too.
5
6Motivation for WW-Mutexes
7-------------------------
8
9GPU's do operations that commonly involve many buffers. Those buffers
10can be shared across contexts/processes, exist in different memory
11domains (for example VRAM vs system memory), and so on. And with
12PRIME / dmabuf, they can even be shared across devices. So there are
13a handful of situations where the driver needs to wait for buffers to
14become ready. If you think about this in terms of waiting on a buffer
15mutex for it to become available, this presents a problem because
16there is no way to guarantee that buffers appear in a execbuf/batch in
17the same order in all contexts. That is directly under control of
18userspace, and a result of the sequence of GL calls that an application
19makes. Which results in the potential for deadlock. The problem gets
20more complex when you consider that the kernel may need to migrate the
21buffer(s) into VRAM before the GPU operates on the buffer(s), which
22may in turn require evicting some other buffers (and you don't want to
23evict other buffers which are already queued up to the GPU), but for a
24simplified understanding of the problem you can ignore this.
25
26The algorithm that the TTM graphics subsystem came up with for dealing with
27this problem is quite simple. For each group of buffers (execbuf) that need
28to be locked, the caller would be assigned a unique reservation id/ticket,
29from a global counter. In case of deadlock while locking all the buffers
30associated with a execbuf, the one with the lowest reservation ticket (i.e.
31the oldest task) wins, and the one with the higher reservation id (i.e. the
32younger task) unlocks all of the buffers that it has already locked, and then
33tries again.
34
35In the RDBMS literature this deadlock handling approach is called wait/wound:
36The older tasks waits until it can acquire the contended lock. The younger tasks
37needs to back off and drop all the locks it is currently holding, i.e. the
38younger task is wounded.
39
40Concepts
41--------
42
43Compared to normal mutexes two additional concepts/objects show up in the lock
44interface for w/w mutexes:
45
46Acquire context: To ensure eventual forward progress it is important the a task
47trying to acquire locks doesn't grab a new reservation id, but keeps the one it
48acquired when starting the lock acquisition. This ticket is stored in the
49acquire context. Furthermore the acquire context keeps track of debugging state
50to catch w/w mutex interface abuse.
51
52W/w class: In contrast to normal mutexes the lock class needs to be explicit for
53w/w mutexes, since it is required to initialize the acquire context.
54
55Furthermore there are three different class of w/w lock acquire functions:
56
57* Normal lock acquisition with a context, using ww_mutex_lock.
58
59* Slowpath lock acquisition on the contending lock, used by the wounded task
60 after having dropped all already acquired locks. These functions have the
61 _slow postfix.
62
63 From a simple semantics point-of-view the _slow functions are not strictly
64 required, since simply calling the normal ww_mutex_lock functions on the
65 contending lock (after having dropped all other already acquired locks) will
66 work correctly. After all if no other ww mutex has been acquired yet there's
67 no deadlock potential and hence the ww_mutex_lock call will block and not
68 prematurely return -EDEADLK. The advantage of the _slow functions is in
69 interface safety:
70 - ww_mutex_lock has a __must_check int return type, whereas ww_mutex_lock_slow
71 has a void return type. Note that since ww mutex code needs loops/retries
72 anyway the __must_check doesn't result in spurious warnings, even though the
73 very first lock operation can never fail.
74 - When full debugging is enabled ww_mutex_lock_slow checks that all acquired
75 ww mutex have been released (preventing deadlocks) and makes sure that we
76 block on the contending lock (preventing spinning through the -EDEADLK
77 slowpath until the contended lock can be acquired).
78
79* Functions to only acquire a single w/w mutex, which results in the exact same
80 semantics as a normal mutex. This is done by calling ww_mutex_lock with a NULL
81 context.
82
83 Again this is not strictly required. But often you only want to acquire a
84 single lock in which case it's pointless to set up an acquire context (and so
85 better to avoid grabbing a deadlock avoidance ticket).
86
87Of course, all the usual variants for handling wake-ups due to signals are also
88provided.
89
90Usage
91-----
92
93Three different ways to acquire locks within the same w/w class. Common
94definitions for methods #1 and #2:
95
96static DEFINE_WW_CLASS(ww_class);
97
98struct obj {
99 struct ww_mutex lock;
100 /* obj data */
101};
102
103struct obj_entry {
104 struct list_head head;
105 struct obj *obj;
106};
107
108Method 1, using a list in execbuf->buffers that's not allowed to be reordered.
109This is useful if a list of required objects is already tracked somewhere.
110Furthermore the lock helper can use propagate the -EALREADY return code back to
111the caller as a signal that an object is twice on the list. This is useful if
112the list is constructed from userspace input and the ABI requires userspace to
113not have duplicate entries (e.g. for a gpu commandbuffer submission ioctl).
114
115int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
116{
117 struct obj *res_obj = NULL;
118 struct obj_entry *contended_entry = NULL;
119 struct obj_entry *entry;
120
121 ww_acquire_init(ctx, &ww_class);
122
123retry:
124 list_for_each_entry (entry, list, head) {
125 if (entry->obj == res_obj) {
126 res_obj = NULL;
127 continue;
128 }
129 ret = ww_mutex_lock(&entry->obj->lock, ctx);
130 if (ret < 0) {
131 contended_entry = entry;
132 goto err;
133 }
134 }
135
136 ww_acquire_done(ctx);
137 return 0;
138
139err:
140 list_for_each_entry_continue_reverse (entry, list, head)
141 ww_mutex_unlock(&entry->obj->lock);
142
143 if (res_obj)
144 ww_mutex_unlock(&res_obj->lock);
145
146 if (ret == -EDEADLK) {
147 /* we lost out in a seqno race, lock and retry.. */
148 ww_mutex_lock_slow(&contended_entry->obj->lock, ctx);
149 res_obj = contended_entry->obj;
150 goto retry;
151 }
152 ww_acquire_fini(ctx);
153
154 return ret;
155}
156
157Method 2, using a list in execbuf->buffers that can be reordered. Same semantics
158of duplicate entry detection using -EALREADY as method 1 above. But the
159list-reordering allows for a bit more idiomatic code.
160
161int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
162{
163 struct obj_entry *entry, *entry2;
164
165 ww_acquire_init(ctx, &ww_class);
166
167 list_for_each_entry (entry, list, head) {
168 ret = ww_mutex_lock(&entry->obj->lock, ctx);
169 if (ret < 0) {
170 entry2 = entry;
171
172 list_for_each_entry_continue_reverse (entry2, list, head)
173 ww_mutex_unlock(&entry2->obj->lock);
174
175 if (ret != -EDEADLK) {
176 ww_acquire_fini(ctx);
177 return ret;
178 }
179
180 /* we lost out in a seqno race, lock and retry.. */
181 ww_mutex_lock_slow(&entry->obj->lock, ctx);
182
183 /*
184 * Move buf to head of the list, this will point
185 * buf->next to the first unlocked entry,
186 * restarting the for loop.
187 */
188 list_del(&entry->head);
189 list_add(&entry->head, list);
190 }
191 }
192
193 ww_acquire_done(ctx);
194 return 0;
195}
196
197Unlocking works the same way for both methods #1 and #2:
198
199void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
200{
201 struct obj_entry *entry;
202
203 list_for_each_entry (entry, list, head)
204 ww_mutex_unlock(&entry->obj->lock);
205
206 ww_acquire_fini(ctx);
207}
208
209Method 3 is useful if the list of objects is constructed ad-hoc and not upfront,
210e.g. when adjusting edges in a graph where each node has its own ww_mutex lock,
211and edges can only be changed when holding the locks of all involved nodes. w/w
212mutexes are a natural fit for such a case for two reasons:
213- They can handle lock-acquisition in any order which allows us to start walking
214 a graph from a starting point and then iteratively discovering new edges and
215 locking down the nodes those edges connect to.
216- Due to the -EALREADY return code signalling that a given objects is already
217 held there's no need for additional book-keeping to break cycles in the graph
218 or keep track off which looks are already held (when using more than one node
219 as a starting point).
220
221Note that this approach differs in two important ways from the above methods:
222- Since the list of objects is dynamically constructed (and might very well be
223 different when retrying due to hitting the -EDEADLK wound condition) there's
224 no need to keep any object on a persistent list when it's not locked. We can
225 therefore move the list_head into the object itself.
226- On the other hand the dynamic object list construction also means that the -EALREADY return
227 code can't be propagated.
228
229Note also that methods #1 and #2 and method #3 can be combined, e.g. to first lock a
230list of starting nodes (passed in from userspace) using one of the above
231methods. And then lock any additional objects affected by the operations using
232method #3 below. The backoff/retry procedure will be a bit more involved, since
233when the dynamic locking step hits -EDEADLK we also need to unlock all the
234objects acquired with the fixed list. But the w/w mutex debug checks will catch
235any interface misuse for these cases.
236
237Also, method 3 can't fail the lock acquisition step since it doesn't return
238-EALREADY. Of course this would be different when using the _interruptible
239variants, but that's outside of the scope of these examples here.
240
241struct obj {
242 struct ww_mutex ww_mutex;
243 struct list_head locked_list;
244};
245
246static DEFINE_WW_CLASS(ww_class);
247
248void __unlock_objs(struct list_head *list)
249{
250 struct obj *entry, *temp;
251
252 list_for_each_entry_safe (entry, temp, list, locked_list) {
253 /* need to do that before unlocking, since only the current lock holder is
254 allowed to use object */
255 list_del(&entry->locked_list);
256 ww_mutex_unlock(entry->ww_mutex)
257 }
258}
259
260void lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
261{
262 struct obj *obj;
263
264 ww_acquire_init(ctx, &ww_class);
265
266retry:
267 /* re-init loop start state */
268 loop {
269 /* magic code which walks over a graph and decides which objects
270 * to lock */
271
272 ret = ww_mutex_lock(obj->ww_mutex, ctx);
273 if (ret == -EALREADY) {
274 /* we have that one already, get to the next object */
275 continue;
276 }
277 if (ret == -EDEADLK) {
278 __unlock_objs(list);
279
280 ww_mutex_lock_slow(obj, ctx);
281 list_add(&entry->locked_list, list);
282 goto retry;
283 }
284
285 /* locked a new object, add it to the list */
286 list_add_tail(&entry->locked_list, list);
287 }
288
289 ww_acquire_done(ctx);
290 return 0;
291}
292
293void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx)
294{
295 __unlock_objs(list);
296 ww_acquire_fini(ctx);
297}
298
299Method 4: Only lock one single objects. In that case deadlock detection and
300prevention is obviously overkill, since with grabbing just one lock you can't
301produce a deadlock within just one class. To simplify this case the w/w mutex
302api can be used with a NULL context.
303
304Implementation Details
305----------------------
306
307Design:
308 ww_mutex currently encapsulates a struct mutex, this means no extra overhead for
309 normal mutex locks, which are far more common. As such there is only a small
310 increase in code size if wait/wound mutexes are not used.
311
312 In general, not much contention is expected. The locks are typically used to
313 serialize access to resources for devices. The only way to make wakeups
314 smarter would be at the cost of adding a field to struct mutex_waiter. This
315 would add overhead to all cases where normal mutexes are used, and
316 ww_mutexes are generally less performance sensitive.
317
318Lockdep:
319 Special care has been taken to warn for as many cases of api abuse
320 as possible. Some common api abuses will be caught with
321 CONFIG_DEBUG_MUTEXES, but CONFIG_PROVE_LOCKING is recommended.
322
323 Some of the errors which will be warned about:
324 - Forgetting to call ww_acquire_fini or ww_acquire_init.
325 - Attempting to lock more mutexes after ww_acquire_done.
326 - Attempting to lock the wrong mutex after -EDEADLK and
327 unlocking all mutexes.
328 - Attempting to lock the right mutex after -EDEADLK,
329 before unlocking all mutexes.
330
331 - Calling ww_mutex_lock_slow before -EDEADLK was returned.
332
333 - Unlocking mutexes with the wrong unlock function.
334 - Calling one of the ww_acquire_* twice on the same context.
335 - Using a different ww_class for the mutex than for the ww_acquire_ctx.
336 - Normal lockdep errors that can result in deadlocks.
337
338 Some of the lockdep errors that can result in deadlocks:
339 - Calling ww_acquire_init to initialize a second ww_acquire_ctx before
340 having called ww_acquire_fini on the first.
341 - 'normal' deadlocks that can occur.
342
343FIXME: Update this section once we have the TASK_DEADLOCK task state flag magic
344implemented.
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
index bed73a643a56..f41e66d65e31 100644
--- a/arch/ia64/include/asm/mutex.h
+++ b/arch/ia64/include/asm/mutex.h
@@ -29,17 +29,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
30 * from 1 to a 0 value 30 * from 1 to a 0 value
31 * @count: pointer of type atomic_t 31 * @count: pointer of type atomic_t
32 * @fail_fn: function to call if the original value was not 1
33 * 32 *
34 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 33 * Change the count from 1 to a value lower than 1. This function returns 0
35 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 34 * if the fastpath succeeds, or -1 otherwise.
36 * or anything the slow path function returns.
37 */ 35 */
38static inline int 36static inline int
39__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 37__mutex_fastpath_lock_retval(atomic_t *count)
40{ 38{
41 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1)) 39 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
42 return fail_fn(count); 40 return -1;
43 return 0; 41 return 0;
44} 42}
45 43
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
index 5399f7e18102..127ab23e1f6c 100644
--- a/arch/powerpc/include/asm/mutex.h
+++ b/arch/powerpc/include/asm/mutex.h
@@ -82,17 +82,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
83 * from 1 to a 0 value 83 * from 1 to a 0 value
84 * @count: pointer of type atomic_t 84 * @count: pointer of type atomic_t
85 * @fail_fn: function to call if the original value was not 1
86 * 85 *
87 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 86 * Change the count from 1 to a value lower than 1. This function returns 0
88 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 87 * if the fastpath succeeds, or -1 otherwise.
89 * or anything the slow path function returns.
90 */ 88 */
91static inline int 89static inline int
92__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 90__mutex_fastpath_lock_retval(atomic_t *count)
93{ 91{
94 if (unlikely(__mutex_dec_return_lock(count) < 0)) 92 if (unlikely(__mutex_dec_return_lock(count) < 0))
95 return fail_fn(count); 93 return -1;
96 return 0; 94 return 0;
97} 95}
98 96
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 028ac1f71b51..46ac1ddea683 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
97 return indirect_read_config(bus, devfn, offset, len, val); 97 return indirect_read_config(bus, devfn, offset, len, val);
98} 98}
99 99
100static struct pci_ops fsl_indirect_pci_ops = 100#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
101
102static struct pci_ops fsl_indirect_pcie_ops =
101{ 103{
102 .read = fsl_indirect_read_config, 104 .read = fsl_indirect_read_config,
103 .write = indirect_write_config, 105 .write = indirect_write_config,
104}; 106};
105 107
106static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
107 resource_size_t cfg_addr,
108 resource_size_t cfg_data, u32 flags)
109{
110 setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
111 hose->ops = &fsl_indirect_pci_ops;
112}
113
114#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
115
116#define MAX_PHYS_ADDR_BITS 40 108#define MAX_PHYS_ADDR_BITS 40
117static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS; 109static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
118 110
@@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
504 if (!hose->private_data) 496 if (!hose->private_data)
505 goto no_bridge; 497 goto no_bridge;
506 498
507 fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4, 499 setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
508 PPC_INDIRECT_TYPE_BIG_ENDIAN); 500 PPC_INDIRECT_TYPE_BIG_ENDIAN);
509 501
510 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) 502 if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
511 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK; 503 hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
512 504
513 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 505 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
506 /* use fsl_indirect_read_config for PCIe */
507 hose->ops = &fsl_indirect_pcie_ops;
514 /* For PCIE read HEADER_TYPE to identify controler mode */ 508 /* For PCIE read HEADER_TYPE to identify controler mode */
515 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type); 509 early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
516 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) 510 if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
@@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
814 if (ret) 808 if (ret)
815 goto err0; 809 goto err0;
816 } else { 810 } else {
817 fsl_setup_indirect_pci(hose, rsrc_cfg.start, 811 setup_indirect_pci(hose, rsrc_cfg.start,
818 rsrc_cfg.start + 4, 0); 812 rsrc_cfg.start + 4, 0);
819 } 813 }
820 814
821 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. " 815 printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 886ac7d4937a..2f8c1abeb086 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -50,9 +50,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
50{ 50{
51 struct dma_map_ops *dma_ops = get_dma_ops(dev); 51 struct dma_map_ops *dma_ops = get_dma_ops(dev);
52 52
53 debug_dma_mapping_error(dev, dma_addr);
53 if (dma_ops->mapping_error) 54 if (dma_ops->mapping_error)
54 return dma_ops->mapping_error(dev, dma_addr); 55 return dma_ops->mapping_error(dev, dma_addr);
55 return (dma_addr == 0UL); 56 return (dma_addr == DMA_ERROR_CODE);
56} 57}
57 58
58static inline void *dma_alloc_coherent(struct device *dev, size_t size, 59static inline void *dma_alloc_coherent(struct device *dev, size_t size,
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index d8a6a385d048..feb719d3c851 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
754 .write = reipl_fcp_scpdata_write, 754 .write = reipl_fcp_scpdata_write,
755}; 755};
756 756
757DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n", 757DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
758 reipl_block_fcp->ipl_info.fcp.wwpn); 758 reipl_block_fcp->ipl_info.fcp.wwpn);
759DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n", 759DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
760 reipl_block_fcp->ipl_info.fcp.lun); 760 reipl_block_fcp->ipl_info.fcp.lun);
761DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n", 761DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
762 reipl_block_fcp->ipl_info.fcp.bootprog); 762 reipl_block_fcp->ipl_info.fcp.bootprog);
@@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = {
1323 1323
1324/* FCP dump device attributes */ 1324/* FCP dump device attributes */
1325 1325
1326DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n", 1326DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
1327 dump_block_fcp->ipl_info.fcp.wwpn); 1327 dump_block_fcp->ipl_info.fcp.wwpn);
1328DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n", 1328DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
1329 dump_block_fcp->ipl_info.fcp.lun); 1329 dump_block_fcp->ipl_info.fcp.lun);
1330DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n", 1330DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
1331 dump_block_fcp->ipl_info.fcp.bootprog); 1331 dump_block_fcp->ipl_info.fcp.bootprog);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 408e866ae548..dd3c1994b8bd 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -312,6 +312,7 @@ void measurement_alert_subclass_unregister(void)
312} 312}
313EXPORT_SYMBOL(measurement_alert_subclass_unregister); 313EXPORT_SYMBOL(measurement_alert_subclass_unregister);
314 314
315#ifdef CONFIG_SMP
315void synchronize_irq(unsigned int irq) 316void synchronize_irq(unsigned int irq)
316{ 317{
317 /* 318 /*
@@ -320,6 +321,7 @@ void synchronize_irq(unsigned int irq)
320 */ 321 */
321} 322}
322EXPORT_SYMBOL_GPL(synchronize_irq); 323EXPORT_SYMBOL_GPL(synchronize_irq);
324#endif
323 325
324#ifndef CONFIG_PCI 326#ifndef CONFIG_PCI
325 327
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 3cbd3b8bf311..cca388253a39 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -123,7 +123,8 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
123 continue; 123 continue;
124 } else if ((addr <= chunk->addr) && 124 } else if ((addr <= chunk->addr) &&
125 (addr + size >= chunk->addr + chunk->size)) { 125 (addr + size >= chunk->addr + chunk->size)) {
126 memset(chunk, 0 , sizeof(*chunk)); 126 memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
127 memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
127 } else if (addr + size < chunk->addr + chunk->size) { 128 } else if (addr + size < chunk->addr + chunk->size) {
128 chunk->size = chunk->addr + chunk->size - addr - size; 129 chunk->size = chunk->addr + chunk->size - addr - size;
129 chunk->addr = addr + size; 130 chunk->addr = addr + size;
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h
index 090358a7e1bb..dad29b687bd3 100644
--- a/arch/sh/include/asm/mutex-llsc.h
+++ b/arch/sh/include/asm/mutex-llsc.h
@@ -37,7 +37,7 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
37} 37}
38 38
39static inline int 39static inline int
40__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 40__mutex_fastpath_lock_retval(atomic_t *count)
41{ 41{
42 int __done, __res; 42 int __done, __res;
43 43
@@ -51,7 +51,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
51 : "t"); 51 : "t");
52 52
53 if (unlikely(!__done || __res != 0)) 53 if (unlikely(!__done || __res != 0))
54 __res = fail_fn(count); 54 __res = -1;
55 55
56 return __res; 56 return __res;
57} 57}
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
index 03f90c8a5a7c..0208c3c2cbc6 100644
--- a/arch/x86/include/asm/mutex_32.h
+++ b/arch/x86/include/asm/mutex_32.h
@@ -42,17 +42,14 @@ do { \
42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
43 * from 1 to a 0 value 43 * from 1 to a 0 value
44 * @count: pointer of type atomic_t 44 * @count: pointer of type atomic_t
45 * @fail_fn: function to call if the original value was not 1
46 * 45 *
47 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 46 * Change the count from 1 to a value lower than 1. This function returns 0
48 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 47 * if the fastpath succeeds, or -1 otherwise.
49 * or anything the slow path function returns
50 */ 48 */
51static inline int __mutex_fastpath_lock_retval(atomic_t *count, 49static inline int __mutex_fastpath_lock_retval(atomic_t *count)
52 int (*fail_fn)(atomic_t *))
53{ 50{
54 if (unlikely(atomic_dec_return(count) < 0)) 51 if (unlikely(atomic_dec_return(count) < 0))
55 return fail_fn(count); 52 return -1;
56 else 53 else
57 return 0; 54 return 0;
58} 55}
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index 68a87b0f8e29..2c543fff241b 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -37,17 +37,14 @@ do { \
37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
38 * from 1 to a 0 value 38 * from 1 to a 0 value
39 * @count: pointer of type atomic_t 39 * @count: pointer of type atomic_t
40 * @fail_fn: function to call if the original value was not 1
41 * 40 *
42 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 41 * Change the count from 1 to a value lower than 1. This function returns 0
43 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 42 * if the fastpath succeeds, or -1 otherwise.
44 * or anything the slow path function returns
45 */ 43 */
46static inline int __mutex_fastpath_lock_retval(atomic_t *count, 44static inline int __mutex_fastpath_lock_retval(atomic_t *count)
47 int (*fail_fn)(atomic_t *))
48{ 45{
49 if (unlikely(atomic_dec_return(count) < 0)) 46 if (unlikely(atomic_dec_return(count) < 0))
50 return fail_fn(count); 47 return -1;
51 else 48 else
52 return 0; 49 return 0;
53} 50}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d6cbfe9df218..fa061d46527f 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -137,7 +137,7 @@ static const struct xpad_device {
137 { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 137 { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
138 { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, 138 { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
139 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, 139 { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
140 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 }, 140 { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
141 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, 141 { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
142 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, 142 { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
143 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, 143 { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 62a2c0e4cc99..7ac9c9818d55 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -431,6 +431,7 @@ config KEYBOARD_TEGRA
431 431
432config KEYBOARD_OPENCORES 432config KEYBOARD_OPENCORES
433 tristate "OpenCores Keyboard Controller" 433 tristate "OpenCores Keyboard Controller"
434 depends on HAS_IOMEM
434 help 435 help
435 Say Y here if you want to use the OpenCores Keyboard Controller 436 Say Y here if you want to use the OpenCores Keyboard Controller
436 http://www.opencores.org/project,keyboardcontroller 437 http://www.opencores.org/project,keyboardcontroller
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index aebfe3ecb945..1bda828f4b55 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -205,6 +205,7 @@ config SERIO_XILINX_XPS_PS2
205 205
206config SERIO_ALTERA_PS2 206config SERIO_ALTERA_PS2
207 tristate "Altera UP PS/2 controller" 207 tristate "Altera UP PS/2 controller"
208 depends on HAS_IOMEM
208 help 209 help
209 Say Y here if you have Altera University Program PS/2 ports. 210 Say Y here if you have Altera University Program PS/2 ports.
210 211
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 518282da6d85..384fbcd0cee0 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -363,6 +363,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
363 case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ 363 case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
364 case 0x160802: /* Cintiq 13HD Pro Pen */ 364 case 0x160802: /* Cintiq 13HD Pro Pen */
365 case 0x180802: /* DTH2242 Pen */ 365 case 0x180802: /* DTH2242 Pen */
366 case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
366 wacom->tool[idx] = BTN_TOOL_PEN; 367 wacom->tool[idx] = BTN_TOOL_PEN;
367 break; 368 break;
368 369
@@ -401,6 +402,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
401 case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ 402 case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
402 case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ 403 case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
403 case 0x18080a: /* DTH2242 Eraser */ 404 case 0x18080a: /* DTH2242 Eraser */
405 case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
404 wacom->tool[idx] = BTN_TOOL_RUBBER; 406 wacom->tool[idx] = BTN_TOOL_RUBBER;
405 break; 407 break;
406 408
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 8e60437ac85b..ae89d2609ab0 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -116,6 +116,15 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
116 return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd); 116 return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
117} 117}
118 118
119static int cyttsp_handshake(struct cyttsp *ts)
120{
121 if (ts->pdata->use_hndshk)
122 return ttsp_send_command(ts,
123 ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
124
125 return 0;
126}
127
119static int cyttsp_load_bl_regs(struct cyttsp *ts) 128static int cyttsp_load_bl_regs(struct cyttsp *ts)
120{ 129{
121 memset(&ts->bl_data, 0, sizeof(ts->bl_data)); 130 memset(&ts->bl_data, 0, sizeof(ts->bl_data));
@@ -133,7 +142,7 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
133 memcpy(bl_cmd, bl_command, sizeof(bl_command)); 142 memcpy(bl_cmd, bl_command, sizeof(bl_command));
134 if (ts->pdata->bl_keys) 143 if (ts->pdata->bl_keys)
135 memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS], 144 memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
136 ts->pdata->bl_keys, sizeof(bl_command)); 145 ts->pdata->bl_keys, CY_NUM_BL_KEYS);
137 146
138 error = ttsp_write_block_data(ts, CY_REG_BASE, 147 error = ttsp_write_block_data(ts, CY_REG_BASE,
139 sizeof(bl_cmd), bl_cmd); 148 sizeof(bl_cmd), bl_cmd);
@@ -167,6 +176,10 @@ static int cyttsp_set_operational_mode(struct cyttsp *ts)
167 if (error) 176 if (error)
168 return error; 177 return error;
169 178
179 error = cyttsp_handshake(ts);
180 if (error)
181 return error;
182
170 return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0; 183 return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0;
171} 184}
172 185
@@ -188,6 +201,10 @@ static int cyttsp_set_sysinfo_mode(struct cyttsp *ts)
188 if (error) 201 if (error)
189 return error; 202 return error;
190 203
204 error = cyttsp_handshake(ts);
205 if (error)
206 return error;
207
191 if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl) 208 if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl)
192 return -EIO; 209 return -EIO;
193 210
@@ -344,12 +361,9 @@ static irqreturn_t cyttsp_irq(int irq, void *handle)
344 goto out; 361 goto out;
345 362
346 /* provide flow control handshake */ 363 /* provide flow control handshake */
347 if (ts->pdata->use_hndshk) { 364 error = cyttsp_handshake(ts);
348 error = ttsp_send_command(ts, 365 if (error)
349 ts->xy_data.hst_mode ^ CY_HNDSHK_BIT); 366 goto out;
350 if (error)
351 goto out;
352 }
353 367
354 if (unlikely(ts->state == CY_IDLE_STATE)) 368 if (unlikely(ts->state == CY_IDLE_STATE))
355 goto out; 369 goto out;
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 1aa3c6967e70..f1ebde369f86 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -67,8 +67,8 @@ struct cyttsp_xydata {
67/* TTSP System Information interface definition */ 67/* TTSP System Information interface definition */
68struct cyttsp_sysinfo_data { 68struct cyttsp_sysinfo_data {
69 u8 hst_mode; 69 u8 hst_mode;
70 u8 mfg_cmd;
71 u8 mfg_stat; 70 u8 mfg_stat;
71 u8 mfg_cmd;
72 u8 cid[3]; 72 u8 cid[3];
73 u8 tt_undef1; 73 u8 tt_undef1;
74 u8 uid[8]; 74 u8 uid[8];
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index c735c5a008a2..6427600b5bbe 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -59,7 +59,7 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
59 int ret; 59 int ret;
60 60
61 sg_free_table(sgt); 61 sg_free_table(sgt);
62 ret = sg_alloc_table(sgt, nents, GFP_KERNEL); 62 ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
63 if (ret) 63 if (ret)
64 return ret; 64 return ret;
65 } 65 }
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index f5d84d6f8222..48b396fced0a 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1075,7 +1075,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1075 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) 1075 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1076 return NULL; 1076 return NULL;
1077 1077
1078 pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL); 1078 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1079 if (!pdata) { 1079 if (!pdata) {
1080 dev_err(&pdev->dev, 1080 dev_err(&pdev->dev,
1081 "failed to allocate memory for platform data\n"); 1081 "failed to allocate memory for platform data\n");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 5000586cb98d..71cc3e6ef47c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -444,7 +444,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
444 } 444 }
445 445
446 ret = pm_runtime_get_sync(&sdd->pdev->dev); 446 ret = pm_runtime_get_sync(&sdd->pdev->dev);
447 if (ret != 0) { 447 if (ret < 0) {
448 dev_err(dev, "Failed to enable device: %d\n", ret); 448 dev_err(dev, "Failed to enable device: %d\n", ret);
449 goto out_tx; 449 goto out_tx;
450 } 450 }
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e570081f9f76..35f281033142 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2470,13 +2470,16 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2470 .mode = mode 2470 .mode = mode
2471 }; 2471 };
2472 int err; 2472 int err;
2473 bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
2474 (mode & FALLOC_FL_PUNCH_HOLE);
2473 2475
2474 if (fc->no_fallocate) 2476 if (fc->no_fallocate)
2475 return -EOPNOTSUPP; 2477 return -EOPNOTSUPP;
2476 2478
2477 if (mode & FALLOC_FL_PUNCH_HOLE) { 2479 if (lock_inode) {
2478 mutex_lock(&inode->i_mutex); 2480 mutex_lock(&inode->i_mutex);
2479 fuse_set_nowrite(inode); 2481 if (mode & FALLOC_FL_PUNCH_HOLE)
2482 fuse_set_nowrite(inode);
2480 } 2483 }
2481 2484
2482 req = fuse_get_req_nopages(fc); 2485 req = fuse_get_req_nopages(fc);
@@ -2511,8 +2514,9 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2511 fuse_invalidate_attr(inode); 2514 fuse_invalidate_attr(inode);
2512 2515
2513out: 2516out:
2514 if (mode & FALLOC_FL_PUNCH_HOLE) { 2517 if (lock_inode) {
2515 fuse_release_nowrite(inode); 2518 if (mode & FALLOC_FL_PUNCH_HOLE)
2519 fuse_release_nowrite(inode);
2516 mutex_unlock(&inode->i_mutex); 2520 mutex_unlock(&inode->i_mutex);
2517 } 2521 }
2518 2522
diff --git a/fs/splice.c b/fs/splice.c
index 9eca476227d5..d37431dd60a1 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1283,6 +1283,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
1283 * @in: file to splice from 1283 * @in: file to splice from
1284 * @ppos: input file offset 1284 * @ppos: input file offset
1285 * @out: file to splice to 1285 * @out: file to splice to
1286 * @opos: output file offset
1286 * @len: number of bytes to splice 1287 * @len: number of bytes to splice
1287 * @flags: splice modifier flags 1288 * @flags: splice modifier flags
1288 * 1289 *
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index f104af7cf437..d4f9fb4e53df 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -28,17 +28,15 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
29 * from 1 to a 0 value 29 * from 1 to a 0 value
30 * @count: pointer of type atomic_t 30 * @count: pointer of type atomic_t
31 * @fail_fn: function to call if the original value was not 1
32 * 31 *
33 * Change the count from 1 to a value lower than 1, and call <fail_fn> if 32 * Change the count from 1 to a value lower than 1. This function returns 0
34 * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, 33 * if the fastpath succeeds, or -1 otherwise.
35 * or anything the slow path function returns.
36 */ 34 */
37static inline int 35static inline int
38__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 36__mutex_fastpath_lock_retval(atomic_t *count)
39{ 37{
40 if (unlikely(atomic_dec_return(count) < 0)) 38 if (unlikely(atomic_dec_return(count) < 0))
41 return fail_fn(count); 39 return -1;
42 return 0; 40 return 0;
43} 41}
44 42
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
index e1bbbc72b6a2..61069ed334e2 100644
--- a/include/asm-generic/mutex-null.h
+++ b/include/asm-generic/mutex-null.h
@@ -11,7 +11,7 @@
11#define _ASM_GENERIC_MUTEX_NULL_H 11#define _ASM_GENERIC_MUTEX_NULL_H
12 12
13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count) 13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
14#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count) 14#define __mutex_fastpath_lock_retval(count) (-1)
15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count) 15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count) 16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
17#define __mutex_slowpath_needs_to_unlock() 1 17#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index c04e0db8a2d6..f169ec064785 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -39,18 +39,16 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
40 * from 1 to a 0 value 40 * from 1 to a 0 value
41 * @count: pointer of type atomic_t 41 * @count: pointer of type atomic_t
42 * @fail_fn: function to call if the original value was not 1
43 * 42 *
44 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it 43 * Change the count from 1 to a value lower than 1. This function returns 0
45 * wasn't 1 originally. This function returns 0 if the fastpath succeeds, 44 * if the fastpath succeeds, or -1 otherwise.
46 * or anything the slow path function returns
47 */ 45 */
48static inline int 46static inline int
49__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 47__mutex_fastpath_lock_retval(atomic_t *count)
50{ 48{
51 if (unlikely(atomic_xchg(count, 0) != 1)) 49 if (unlikely(atomic_xchg(count, 0) != 1))
52 if (likely(atomic_xchg(count, -1) != 1)) 50 if (likely(atomic_xchg(count, -1) != 1))
53 return fail_fn(count); 51 return -1;
54 return 0; 52 return 0;
55} 53}
56 54
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 731d77d6e155..4ac8b1977b73 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/lockdep.h> 5#include <linux/lockdep.h>
6#include <linux/debug_locks.h>
6 7
7/* 8/*
8 * Mutexes - debugging helpers: 9 * Mutexes - debugging helpers:
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 433da8a1a426..3793ed7feeeb 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -10,6 +10,7 @@
10#ifndef __LINUX_MUTEX_H 10#ifndef __LINUX_MUTEX_H
11#define __LINUX_MUTEX_H 11#define __LINUX_MUTEX_H
12 12
13#include <asm/current.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <linux/spinlock_types.h> 15#include <linux/spinlock_types.h>
15#include <linux/linkage.h> 16#include <linux/linkage.h>
@@ -77,6 +78,40 @@ struct mutex_waiter {
77#endif 78#endif
78}; 79};
79 80
81struct ww_class {
82 atomic_long_t stamp;
83 struct lock_class_key acquire_key;
84 struct lock_class_key mutex_key;
85 const char *acquire_name;
86 const char *mutex_name;
87};
88
89struct ww_acquire_ctx {
90 struct task_struct *task;
91 unsigned long stamp;
92 unsigned acquired;
93#ifdef CONFIG_DEBUG_MUTEXES
94 unsigned done_acquire;
95 struct ww_class *ww_class;
96 struct ww_mutex *contending_lock;
97#endif
98#ifdef CONFIG_DEBUG_LOCK_ALLOC
99 struct lockdep_map dep_map;
100#endif
101#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
102 unsigned deadlock_inject_interval;
103 unsigned deadlock_inject_countdown;
104#endif
105};
106
107struct ww_mutex {
108 struct mutex base;
109 struct ww_acquire_ctx *ctx;
110#ifdef CONFIG_DEBUG_MUTEXES
111 struct ww_class *ww_class;
112#endif
113};
114
80#ifdef CONFIG_DEBUG_MUTEXES 115#ifdef CONFIG_DEBUG_MUTEXES
81# include <linux/mutex-debug.h> 116# include <linux/mutex-debug.h>
82#else 117#else
@@ -101,8 +136,11 @@ static inline void mutex_destroy(struct mutex *lock) {}
101#ifdef CONFIG_DEBUG_LOCK_ALLOC 136#ifdef CONFIG_DEBUG_LOCK_ALLOC
102# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ 137# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
103 , .dep_map = { .name = #lockname } 138 , .dep_map = { .name = #lockname }
139# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
140 , .ww_class = &ww_class
104#else 141#else
105# define __DEP_MAP_MUTEX_INITIALIZER(lockname) 142# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
143# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
106#endif 144#endif
107 145
108#define __MUTEX_INITIALIZER(lockname) \ 146#define __MUTEX_INITIALIZER(lockname) \
@@ -112,13 +150,49 @@ static inline void mutex_destroy(struct mutex *lock) {}
112 __DEBUG_MUTEX_INITIALIZER(lockname) \ 150 __DEBUG_MUTEX_INITIALIZER(lockname) \
113 __DEP_MAP_MUTEX_INITIALIZER(lockname) } 151 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
114 152
153#define __WW_CLASS_INITIALIZER(ww_class) \
154 { .stamp = ATOMIC_LONG_INIT(0) \
155 , .acquire_name = #ww_class "_acquire" \
156 , .mutex_name = #ww_class "_mutex" }
157
158#define __WW_MUTEX_INITIALIZER(lockname, class) \
159 { .base = { \__MUTEX_INITIALIZER(lockname) } \
160 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
161
115#define DEFINE_MUTEX(mutexname) \ 162#define DEFINE_MUTEX(mutexname) \
116 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) 163 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
117 164
165#define DEFINE_WW_CLASS(classname) \
166 struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
167
168#define DEFINE_WW_MUTEX(mutexname, ww_class) \
169 struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
170
171
118extern void __mutex_init(struct mutex *lock, const char *name, 172extern void __mutex_init(struct mutex *lock, const char *name,
119 struct lock_class_key *key); 173 struct lock_class_key *key);
120 174
121/** 175/**
176 * ww_mutex_init - initialize the w/w mutex
177 * @lock: the mutex to be initialized
178 * @ww_class: the w/w class the mutex should belong to
179 *
180 * Initialize the w/w mutex to unlocked state and associate it with the given
181 * class.
182 *
183 * It is not allowed to initialize an already locked mutex.
184 */
185static inline void ww_mutex_init(struct ww_mutex *lock,
186 struct ww_class *ww_class)
187{
188 __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key);
189 lock->ctx = NULL;
190#ifdef CONFIG_DEBUG_MUTEXES
191 lock->ww_class = ww_class;
192#endif
193}
194
195/**
122 * mutex_is_locked - is the mutex locked 196 * mutex_is_locked - is the mutex locked
123 * @lock: the mutex to be queried 197 * @lock: the mutex to be queried
124 * 198 *
@@ -136,6 +210,7 @@ static inline int mutex_is_locked(struct mutex *lock)
136#ifdef CONFIG_DEBUG_LOCK_ALLOC 210#ifdef CONFIG_DEBUG_LOCK_ALLOC
137extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 211extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
138extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 212extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
213
139extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 214extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
140 unsigned int subclass); 215 unsigned int subclass);
141extern int __must_check mutex_lock_killable_nested(struct mutex *lock, 216extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
@@ -147,7 +222,7 @@ extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
147 222
148#define mutex_lock_nest_lock(lock, nest_lock) \ 223#define mutex_lock_nest_lock(lock, nest_lock) \
149do { \ 224do { \
150 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ 225 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
151 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 226 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
152} while (0) 227} while (0)
153 228
@@ -170,6 +245,292 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
170 */ 245 */
171extern int mutex_trylock(struct mutex *lock); 246extern int mutex_trylock(struct mutex *lock);
172extern void mutex_unlock(struct mutex *lock); 247extern void mutex_unlock(struct mutex *lock);
248
249/**
250 * ww_acquire_init - initialize a w/w acquire context
251 * @ctx: w/w acquire context to initialize
252 * @ww_class: w/w class of the context
253 *
254 * Initializes an context to acquire multiple mutexes of the given w/w class.
255 *
256 * Context-based w/w mutex acquiring can be done in any order whatsoever within
257 * a given lock class. Deadlocks will be detected and handled with the
258 * wait/wound logic.
259 *
260 * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
261 * result in undetected deadlocks and is so forbidden. Mixing different contexts
262 * for the same w/w class when acquiring mutexes can also result in undetected
263 * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
264 * enabling CONFIG_PROVE_LOCKING.
265 *
266 * Nesting of acquire contexts for _different_ w/w classes is possible, subject
267 * to the usual locking rules between different lock classes.
268 *
269 * An acquire context must be released with ww_acquire_fini by the same task
270 * before the memory is freed. It is recommended to allocate the context itself
271 * on the stack.
272 */
273static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
274 struct ww_class *ww_class)
275{
276 ctx->task = current;
277 ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
278 ctx->acquired = 0;
279#ifdef CONFIG_DEBUG_MUTEXES
280 ctx->ww_class = ww_class;
281 ctx->done_acquire = 0;
282 ctx->contending_lock = NULL;
283#endif
284#ifdef CONFIG_DEBUG_LOCK_ALLOC
285 debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
286 lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
287 &ww_class->acquire_key, 0);
288 mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
289#endif
290#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
291 ctx->deadlock_inject_interval = 1;
292 ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
293#endif
294}
295
296/**
297 * ww_acquire_done - marks the end of the acquire phase
298 * @ctx: the acquire context
299 *
300 * Marks the end of the acquire phase, any further w/w mutex lock calls using
301 * this context are forbidden.
302 *
303 * Calling this function is optional, it is just useful to document w/w mutex
304 * code and clearly designated the acquire phase from actually using the locked
305 * data structures.
306 */
307static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
308{
309#ifdef CONFIG_DEBUG_MUTEXES
310 lockdep_assert_held(ctx);
311
312 DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
313 ctx->done_acquire = 1;
314#endif
315}
316
317/**
318 * ww_acquire_fini - releases a w/w acquire context
319 * @ctx: the acquire context to free
320 *
321 * Releases a w/w acquire context. This must be called _after_ all acquired w/w
322 * mutexes have been released with ww_mutex_unlock.
323 */
324static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
325{
326#ifdef CONFIG_DEBUG_MUTEXES
327 mutex_release(&ctx->dep_map, 0, _THIS_IP_);
328
329 DEBUG_LOCKS_WARN_ON(ctx->acquired);
330 if (!config_enabled(CONFIG_PROVE_LOCKING))
331 /*
332 * lockdep will normally handle this,
333 * but fail without anyway
334 */
335 ctx->done_acquire = 1;
336
337 if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
338 /* ensure ww_acquire_fini will still fail if called twice */
339 ctx->acquired = ~0U;
340#endif
341}
342
343extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
344 struct ww_acquire_ctx *ctx);
345extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
346 struct ww_acquire_ctx *ctx);
347
348/**
349 * ww_mutex_lock - acquire the w/w mutex
350 * @lock: the mutex to be acquired
351 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
352 *
353 * Lock the w/w mutex exclusively for this task.
354 *
355 * Deadlocks within a given w/w class of locks are detected and handled with the
356 * wait/wound algorithm. If the lock isn't immediately avaiable this function
357 * will either sleep until it is (wait case). Or it selects the current context
358 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
359 * same lock with the same context twice is also detected and signalled by
360 * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
361 *
362 * In the wound case the caller must release all currently held w/w mutexes for
363 * the given context and then wait for this contending lock to be available by
364 * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
365 * lock and proceed with trying to acquire further w/w mutexes (e.g. when
366 * scanning through lru lists trying to free resources).
367 *
368 * The mutex must later on be released by the same task that
369 * acquired it. The task may not exit without first unlocking the mutex. Also,
370 * kernel memory where the mutex resides must not be freed with the mutex still
371 * locked. The mutex must first be initialized (or statically defined) before it
372 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
373 * of the same w/w lock class as was used to initialize the acquire context.
374 *
375 * A mutex acquired with this function must be released with ww_mutex_unlock.
376 */
377static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
378{
379 if (ctx)
380 return __ww_mutex_lock(lock, ctx);
381 else {
382 mutex_lock(&lock->base);
383 return 0;
384 }
385}
386
387/**
388 * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
389 * @lock: the mutex to be acquired
390 * @ctx: w/w acquire context
391 *
392 * Lock the w/w mutex exclusively for this task.
393 *
394 * Deadlocks within a given w/w class of locks are detected and handled with the
395 * wait/wound algorithm. If the lock isn't immediately avaiable this function
396 * will either sleep until it is (wait case). Or it selects the current context
397 * for backing off by returning -EDEADLK (wound case). Trying to acquire the
398 * same lock with the same context twice is also detected and signalled by
399 * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
400 * signal arrives while waiting for the lock then this function returns -EINTR.
401 *
402 * In the wound case the caller must release all currently held w/w mutexes for
403 * the given context and then wait for this contending lock to be available by
404 * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
405 * not acquire this lock and proceed with trying to acquire further w/w mutexes
406 * (e.g. when scanning through lru lists trying to free resources).
407 *
408 * The mutex must later on be released by the same task that
409 * acquired it. The task may not exit without first unlocking the mutex. Also,
410 * kernel memory where the mutex resides must not be freed with the mutex still
411 * locked. The mutex must first be initialized (or statically defined) before it
412 * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
413 * of the same w/w lock class as was used to initialize the acquire context.
414 *
415 * A mutex acquired with this function must be released with ww_mutex_unlock.
416 */
417static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
418 struct ww_acquire_ctx *ctx)
419{
420 if (ctx)
421 return __ww_mutex_lock_interruptible(lock, ctx);
422 else
423 return mutex_lock_interruptible(&lock->base);
424}
425
426/**
427 * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
428 * @lock: the mutex to be acquired
429 * @ctx: w/w acquire context
430 *
431 * Acquires a w/w mutex with the given context after a wound case. This function
432 * will sleep until the lock becomes available.
433 *
434 * The caller must have released all w/w mutexes already acquired with the
435 * context and then call this function on the contended lock.
436 *
437 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
438 * needs with ww_mutex_lock. Note that the -EALREADY return code from
439 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
440 *
441 * It is forbidden to call this function with any other w/w mutexes associated
442 * with the context held. It is forbidden to call this on anything else than the
443 * contending mutex.
444 *
445 * Note that the slowpath lock acquiring can also be done by calling
446 * ww_mutex_lock directly. This function here is simply to help w/w mutex
447 * locking code readability by clearly denoting the slowpath.
448 */
449static inline void
450ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
451{
452 int ret;
453#ifdef CONFIG_DEBUG_MUTEXES
454 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
455#endif
456 ret = ww_mutex_lock(lock, ctx);
457 (void)ret;
458}
459
460/**
461 * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex,
462 * interruptible
463 * @lock: the mutex to be acquired
464 * @ctx: w/w acquire context
465 *
466 * Acquires a w/w mutex with the given context after a wound case. This function
467 * will sleep until the lock becomes available and returns 0 when the lock has
468 * been acquired. If a signal arrives while waiting for the lock then this
469 * function returns -EINTR.
470 *
471 * The caller must have released all w/w mutexes already acquired with the
472 * context and then call this function on the contended lock.
473 *
474 * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
475 * needs with ww_mutex_lock. Note that the -EALREADY return code from
476 * ww_mutex_lock can be used to avoid locking this contended mutex twice.
477 *
478 * It is forbidden to call this function with any other w/w mutexes associated
479 * with the given context held. It is forbidden to call this on anything else
480 * than the contending mutex.
481 *
482 * Note that the slowpath lock acquiring can also be done by calling
483 * ww_mutex_lock_interruptible directly. This function here is simply to help
484 * w/w mutex locking code readability by clearly denoting the slowpath.
485 */
486static inline int __must_check
487ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
488 struct ww_acquire_ctx *ctx)
489{
490#ifdef CONFIG_DEBUG_MUTEXES
491 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
492#endif
493 return ww_mutex_lock_interruptible(lock, ctx);
494}
495
496extern void ww_mutex_unlock(struct ww_mutex *lock);
497
498/**
499 * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
500 * @lock: mutex to lock
501 *
502 * Trylocks a mutex without acquire context, so no deadlock detection is
503 * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
504 */
505static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
506{
507 return mutex_trylock(&lock->base);
508}
509
510/***
511 * ww_mutex_destroy - mark a w/w mutex unusable
512 * @lock: the mutex to be destroyed
513 *
514 * This function marks the mutex uninitialized, and any subsequent
515 * use of the mutex is forbidden. The mutex must not be locked when
516 * this function is called.
517 */
518static inline void ww_mutex_destroy(struct ww_mutex *lock)
519{
520 mutex_destroy(&lock->base);
521}
522
523/**
524 * ww_mutex_is_locked - is the w/w mutex locked
525 * @lock: the mutex to be queried
526 *
527 * Returns 1 if the mutex is locked, 0 if unlocked.
528 */
529static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
530{
531 return mutex_is_locked(&lock->base);
532}
533
173extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); 534extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
174 535
175#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX 536#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ad53a664f113..e581ada5faf4 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -254,16 +254,165 @@ void __sched mutex_unlock(struct mutex *lock)
254 254
255EXPORT_SYMBOL(mutex_unlock); 255EXPORT_SYMBOL(mutex_unlock);
256 256
257/**
258 * ww_mutex_unlock - release the w/w mutex
259 * @lock: the mutex to be released
260 *
261 * Unlock a mutex that has been locked by this task previously with any of the
262 * ww_mutex_lock* functions (with or without an acquire context). It is
263 * forbidden to release the locks after releasing the acquire context.
264 *
265 * This function must not be used in interrupt context. Unlocking
266 * of a unlocked mutex is not allowed.
267 */
268void __sched ww_mutex_unlock(struct ww_mutex *lock)
269{
270 /*
271 * The unlocking fastpath is the 0->1 transition from 'locked'
272 * into 'unlocked' state:
273 */
274 if (lock->ctx) {
275#ifdef CONFIG_DEBUG_MUTEXES
276 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
277#endif
278 if (lock->ctx->acquired > 0)
279 lock->ctx->acquired--;
280 lock->ctx = NULL;
281 }
282
283#ifndef CONFIG_DEBUG_MUTEXES
284 /*
285 * When debugging is enabled we must not clear the owner before time,
286 * the slow path will always be taken, and that clears the owner field
287 * after verifying that it was indeed current.
288 */
289 mutex_clear_owner(&lock->base);
290#endif
291 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
292}
293EXPORT_SYMBOL(ww_mutex_unlock);
294
295static inline int __sched
296__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
297{
298 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
299 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
300
301 if (!hold_ctx)
302 return 0;
303
304 if (unlikely(ctx == hold_ctx))
305 return -EALREADY;
306
307 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
308 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
309#ifdef CONFIG_DEBUG_MUTEXES
310 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
311 ctx->contending_lock = ww;
312#endif
313 return -EDEADLK;
314 }
315
316 return 0;
317}
318
319static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
320 struct ww_acquire_ctx *ww_ctx)
321{
322#ifdef CONFIG_DEBUG_MUTEXES
323 /*
324 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
325 * but released with a normal mutex_unlock in this call.
326 *
327 * This should never happen, always use ww_mutex_unlock.
328 */
329 DEBUG_LOCKS_WARN_ON(ww->ctx);
330
331 /*
332 * Not quite done after calling ww_acquire_done() ?
333 */
334 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
335
336 if (ww_ctx->contending_lock) {
337 /*
338 * After -EDEADLK you tried to
339 * acquire a different ww_mutex? Bad!
340 */
341 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
342
343 /*
344 * You called ww_mutex_lock after receiving -EDEADLK,
345 * but 'forgot' to unlock everything else first?
346 */
347 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
348 ww_ctx->contending_lock = NULL;
349 }
350
351 /*
352 * Naughty, using a different class will lead to undefined behavior!
353 */
354 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
355#endif
356 ww_ctx->acquired++;
357}
358
359/*
360 * after acquiring lock with fastpath or when we lost out in contested
361 * slowpath, set ctx and wake up any waiters so they can recheck.
362 *
363 * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
364 * as the fastpath and opportunistic spinning are disabled in that case.
365 */
366static __always_inline void
367ww_mutex_set_context_fastpath(struct ww_mutex *lock,
368 struct ww_acquire_ctx *ctx)
369{
370 unsigned long flags;
371 struct mutex_waiter *cur;
372
373 ww_mutex_lock_acquired(lock, ctx);
374
375 lock->ctx = ctx;
376
377 /*
378 * The lock->ctx update should be visible on all cores before
379 * the atomic read is done, otherwise contended waiters might be
380 * missed. The contended waiters will either see ww_ctx == NULL
381 * and keep spinning, or it will acquire wait_lock, add itself
382 * to waiter list and sleep.
383 */
384 smp_mb(); /* ^^^ */
385
386 /*
387 * Check if lock is contended, if not there is nobody to wake up
388 */
389 if (likely(atomic_read(&lock->base.count) == 0))
390 return;
391
392 /*
393 * Uh oh, we raced in fastpath, wake up everyone in this case,
394 * so they can see the new lock->ctx.
395 */
396 spin_lock_mutex(&lock->base.wait_lock, flags);
397 list_for_each_entry(cur, &lock->base.wait_list, list) {
398 debug_mutex_wake_waiter(&lock->base, cur);
399 wake_up_process(cur->task);
400 }
401 spin_unlock_mutex(&lock->base.wait_lock, flags);
402}
403
257/* 404/*
258 * Lock a mutex (possibly interruptible), slowpath: 405 * Lock a mutex (possibly interruptible), slowpath:
259 */ 406 */
260static inline int __sched 407static __always_inline int __sched
261__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, 408__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
262 struct lockdep_map *nest_lock, unsigned long ip) 409 struct lockdep_map *nest_lock, unsigned long ip,
410 struct ww_acquire_ctx *ww_ctx)
263{ 411{
264 struct task_struct *task = current; 412 struct task_struct *task = current;
265 struct mutex_waiter waiter; 413 struct mutex_waiter waiter;
266 unsigned long flags; 414 unsigned long flags;
415 int ret;
267 416
268 preempt_disable(); 417 preempt_disable();
269 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); 418 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
@@ -298,6 +447,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
298 struct task_struct *owner; 447 struct task_struct *owner;
299 struct mspin_node node; 448 struct mspin_node node;
300 449
450 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
451 struct ww_mutex *ww;
452
453 ww = container_of(lock, struct ww_mutex, base);
454 /*
455 * If ww->ctx is set the contents are undefined, only
456 * by acquiring wait_lock there is a guarantee that
457 * they are not invalid when reading.
458 *
459 * As such, when deadlock detection needs to be
460 * performed the optimistic spinning cannot be done.
461 */
462 if (ACCESS_ONCE(ww->ctx))
463 break;
464 }
465
301 /* 466 /*
302 * If there's an owner, wait for it to either 467 * If there's an owner, wait for it to either
303 * release the lock or go to sleep. 468 * release the lock or go to sleep.
@@ -312,6 +477,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
312 if ((atomic_read(&lock->count) == 1) && 477 if ((atomic_read(&lock->count) == 1) &&
313 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) { 478 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
314 lock_acquired(&lock->dep_map, ip); 479 lock_acquired(&lock->dep_map, ip);
480 if (!__builtin_constant_p(ww_ctx == NULL)) {
481 struct ww_mutex *ww;
482 ww = container_of(lock, struct ww_mutex, base);
483
484 ww_mutex_set_context_fastpath(ww, ww_ctx);
485 }
486
315 mutex_set_owner(lock); 487 mutex_set_owner(lock);
316 mspin_unlock(MLOCK(lock), &node); 488 mspin_unlock(MLOCK(lock), &node);
317 preempt_enable(); 489 preempt_enable();
@@ -371,15 +543,16 @@ slowpath:
371 * TASK_UNINTERRUPTIBLE case.) 543 * TASK_UNINTERRUPTIBLE case.)
372 */ 544 */
373 if (unlikely(signal_pending_state(state, task))) { 545 if (unlikely(signal_pending_state(state, task))) {
374 mutex_remove_waiter(lock, &waiter, 546 ret = -EINTR;
375 task_thread_info(task)); 547 goto err;
376 mutex_release(&lock->dep_map, 1, ip); 548 }
377 spin_unlock_mutex(&lock->wait_lock, flags);
378 549
379 debug_mutex_free_waiter(&waiter); 550 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
380 preempt_enable(); 551 ret = __mutex_lock_check_stamp(lock, ww_ctx);
381 return -EINTR; 552 if (ret)
553 goto err;
382 } 554 }
555
383 __set_task_state(task, state); 556 __set_task_state(task, state);
384 557
385 /* didn't get the lock, go to sleep: */ 558 /* didn't get the lock, go to sleep: */
@@ -394,6 +567,30 @@ done:
394 mutex_remove_waiter(lock, &waiter, current_thread_info()); 567 mutex_remove_waiter(lock, &waiter, current_thread_info());
395 mutex_set_owner(lock); 568 mutex_set_owner(lock);
396 569
570 if (!__builtin_constant_p(ww_ctx == NULL)) {
571 struct ww_mutex *ww = container_of(lock,
572 struct ww_mutex,
573 base);
574 struct mutex_waiter *cur;
575
576 /*
577 * This branch gets optimized out for the common case,
578 * and is only important for ww_mutex_lock.
579 */
580
581 ww_mutex_lock_acquired(ww, ww_ctx);
582 ww->ctx = ww_ctx;
583
584 /*
585 * Give any possible sleeping processes the chance to wake up,
586 * so they can recheck if they have to back off.
587 */
588 list_for_each_entry(cur, &lock->wait_list, list) {
589 debug_mutex_wake_waiter(lock, cur);
590 wake_up_process(cur->task);
591 }
592 }
593
397 /* set it to 0 if there are no waiters left: */ 594 /* set it to 0 if there are no waiters left: */
398 if (likely(list_empty(&lock->wait_list))) 595 if (likely(list_empty(&lock->wait_list)))
399 atomic_set(&lock->count, 0); 596 atomic_set(&lock->count, 0);
@@ -404,6 +601,14 @@ done:
404 preempt_enable(); 601 preempt_enable();
405 602
406 return 0; 603 return 0;
604
605err:
606 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
607 spin_unlock_mutex(&lock->wait_lock, flags);
608 debug_mutex_free_waiter(&waiter);
609 mutex_release(&lock->dep_map, 1, ip);
610 preempt_enable();
611 return ret;
407} 612}
408 613
409#ifdef CONFIG_DEBUG_LOCK_ALLOC 614#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -411,7 +616,8 @@ void __sched
411mutex_lock_nested(struct mutex *lock, unsigned int subclass) 616mutex_lock_nested(struct mutex *lock, unsigned int subclass)
412{ 617{
413 might_sleep(); 618 might_sleep();
414 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); 619 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
620 subclass, NULL, _RET_IP_, NULL);
415} 621}
416 622
417EXPORT_SYMBOL_GPL(mutex_lock_nested); 623EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -420,7 +626,8 @@ void __sched
420_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) 626_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
421{ 627{
422 might_sleep(); 628 might_sleep();
423 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); 629 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
630 0, nest, _RET_IP_, NULL);
424} 631}
425 632
426EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); 633EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -429,7 +636,8 @@ int __sched
429mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) 636mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
430{ 637{
431 might_sleep(); 638 might_sleep();
432 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); 639 return __mutex_lock_common(lock, TASK_KILLABLE,
640 subclass, NULL, _RET_IP_, NULL);
433} 641}
434EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); 642EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
435 643
@@ -438,10 +646,68 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
438{ 646{
439 might_sleep(); 647 might_sleep();
440 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 648 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
441 subclass, NULL, _RET_IP_); 649 subclass, NULL, _RET_IP_, NULL);
442} 650}
443 651
444EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 652EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
653
654static inline int
655ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
656{
657#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
658 unsigned tmp;
659
660 if (ctx->deadlock_inject_countdown-- == 0) {
661 tmp = ctx->deadlock_inject_interval;
662 if (tmp > UINT_MAX/4)
663 tmp = UINT_MAX;
664 else
665 tmp = tmp*2 + tmp + tmp/2;
666
667 ctx->deadlock_inject_interval = tmp;
668 ctx->deadlock_inject_countdown = tmp;
669 ctx->contending_lock = lock;
670
671 ww_mutex_unlock(lock);
672
673 return -EDEADLK;
674 }
675#endif
676
677 return 0;
678}
679
680int __sched
681__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
682{
683 int ret;
684
685 might_sleep();
686 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
687 0, &ctx->dep_map, _RET_IP_, ctx);
688 if (!ret && ctx->acquired > 0)
689 return ww_mutex_deadlock_injection(lock, ctx);
690
691 return ret;
692}
693EXPORT_SYMBOL_GPL(__ww_mutex_lock);
694
695int __sched
696__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
697{
698 int ret;
699
700 might_sleep();
701 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
702 0, &ctx->dep_map, _RET_IP_, ctx);
703
704 if (!ret && ctx->acquired > 0)
705 return ww_mutex_deadlock_injection(lock, ctx);
706
707 return ret;
708}
709EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
710
445#endif 711#endif
446 712
447/* 713/*
@@ -494,10 +760,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
494 * mutex_lock_interruptible() and mutex_trylock(). 760 * mutex_lock_interruptible() and mutex_trylock().
495 */ 761 */
496static noinline int __sched 762static noinline int __sched
497__mutex_lock_killable_slowpath(atomic_t *lock_count); 763__mutex_lock_killable_slowpath(struct mutex *lock);
498 764
499static noinline int __sched 765static noinline int __sched
500__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 766__mutex_lock_interruptible_slowpath(struct mutex *lock);
501 767
502/** 768/**
503 * mutex_lock_interruptible - acquire the mutex, interruptible 769 * mutex_lock_interruptible - acquire the mutex, interruptible
@@ -515,12 +781,12 @@ int __sched mutex_lock_interruptible(struct mutex *lock)
515 int ret; 781 int ret;
516 782
517 might_sleep(); 783 might_sleep();
518 ret = __mutex_fastpath_lock_retval 784 ret = __mutex_fastpath_lock_retval(&lock->count);
519 (&lock->count, __mutex_lock_interruptible_slowpath); 785 if (likely(!ret)) {
520 if (!ret)
521 mutex_set_owner(lock); 786 mutex_set_owner(lock);
522 787 return 0;
523 return ret; 788 } else
789 return __mutex_lock_interruptible_slowpath(lock);
524} 790}
525 791
526EXPORT_SYMBOL(mutex_lock_interruptible); 792EXPORT_SYMBOL(mutex_lock_interruptible);
@@ -530,12 +796,12 @@ int __sched mutex_lock_killable(struct mutex *lock)
530 int ret; 796 int ret;
531 797
532 might_sleep(); 798 might_sleep();
533 ret = __mutex_fastpath_lock_retval 799 ret = __mutex_fastpath_lock_retval(&lock->count);
534 (&lock->count, __mutex_lock_killable_slowpath); 800 if (likely(!ret)) {
535 if (!ret)
536 mutex_set_owner(lock); 801 mutex_set_owner(lock);
537 802 return 0;
538 return ret; 803 } else
804 return __mutex_lock_killable_slowpath(lock);
539} 805}
540EXPORT_SYMBOL(mutex_lock_killable); 806EXPORT_SYMBOL(mutex_lock_killable);
541 807
@@ -544,24 +810,39 @@ __mutex_lock_slowpath(atomic_t *lock_count)
544{ 810{
545 struct mutex *lock = container_of(lock_count, struct mutex, count); 811 struct mutex *lock = container_of(lock_count, struct mutex, count);
546 812
547 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); 813 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
814 NULL, _RET_IP_, NULL);
548} 815}
549 816
550static noinline int __sched 817static noinline int __sched
551__mutex_lock_killable_slowpath(atomic_t *lock_count) 818__mutex_lock_killable_slowpath(struct mutex *lock)
552{ 819{
553 struct mutex *lock = container_of(lock_count, struct mutex, count); 820 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
821 NULL, _RET_IP_, NULL);
822}
554 823
555 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); 824static noinline int __sched
825__mutex_lock_interruptible_slowpath(struct mutex *lock)
826{
827 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
828 NULL, _RET_IP_, NULL);
556} 829}
557 830
558static noinline int __sched 831static noinline int __sched
559__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 832__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
560{ 833{
561 struct mutex *lock = container_of(lock_count, struct mutex, count); 834 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
835 NULL, _RET_IP_, ctx);
836}
562 837
563 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); 838static noinline int __sched
839__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
840 struct ww_acquire_ctx *ctx)
841{
842 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
843 NULL, _RET_IP_, ctx);
564} 844}
845
565#endif 846#endif
566 847
567/* 848/*
@@ -617,6 +898,45 @@ int __sched mutex_trylock(struct mutex *lock)
617} 898}
618EXPORT_SYMBOL(mutex_trylock); 899EXPORT_SYMBOL(mutex_trylock);
619 900
901#ifndef CONFIG_DEBUG_LOCK_ALLOC
902int __sched
903__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
904{
905 int ret;
906
907 might_sleep();
908
909 ret = __mutex_fastpath_lock_retval(&lock->base.count);
910
911 if (likely(!ret)) {
912 ww_mutex_set_context_fastpath(lock, ctx);
913 mutex_set_owner(&lock->base);
914 } else
915 ret = __ww_mutex_lock_slowpath(lock, ctx);
916 return ret;
917}
918EXPORT_SYMBOL(__ww_mutex_lock);
919
920int __sched
921__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
922{
923 int ret;
924
925 might_sleep();
926
927 ret = __mutex_fastpath_lock_retval(&lock->base.count);
928
929 if (likely(!ret)) {
930 ww_mutex_set_context_fastpath(lock, ctx);
931 mutex_set_owner(&lock->base);
932 } else
933 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
934 return ret;
935}
936EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
937
938#endif
939
620/** 940/**
621 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 941 * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
622 * @cnt: the atomic which we are to dec 942 * @cnt: the atomic which we are to dec
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 566cf2bc08ea..7154f799541a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -547,6 +547,19 @@ config DEBUG_MUTEXES
547 This feature allows mutex semantics violations to be detected and 547 This feature allows mutex semantics violations to be detected and
548 reported. 548 reported.
549 549
550config DEBUG_WW_MUTEX_SLOWPATH
551 bool "Wait/wound mutex debugging: Slowpath testing"
552 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
553 select DEBUG_LOCK_ALLOC
554 select DEBUG_SPINLOCK
555 select DEBUG_MUTEXES
556 help
557 This feature enables slowpath testing for w/w mutex users by
558 injecting additional -EDEADLK wound/backoff cases. Together with
559 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
560 will test all possible w/w mutex interface abuse with the
561 exception of simply not acquiring all the required locks.
562
550config DEBUG_LOCK_ALLOC 563config DEBUG_LOCK_ALLOC
551 bool "Lock debugging: detect incorrect freeing of live locks" 564 bool "Lock debugging: detect incorrect freeing of live locks"
552 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 565 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index f2fa60c59343..96c4c633d95e 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -30,6 +30,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
30 * a locking bug is detected. 30 * a locking bug is detected.
31 */ 31 */
32int debug_locks_silent; 32int debug_locks_silent;
33EXPORT_SYMBOL_GPL(debug_locks_silent);
33 34
34/* 35/*
35 * Generic 'turn off all lock debugging' function: 36 * Generic 'turn off all lock debugging' function:
@@ -44,3 +45,4 @@ int debug_locks_off(void)
44 } 45 }
45 return 0; 46 return 0;
46} 47}
48EXPORT_SYMBOL_GPL(debug_locks_off);
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index c3eb261a7df3..aad024dde3c4 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -26,6 +26,8 @@
26 */ 26 */
27static unsigned int debug_locks_verbose; 27static unsigned int debug_locks_verbose;
28 28
29static DEFINE_WW_CLASS(ww_lockdep);
30
29static int __init setup_debug_locks_verbose(char *str) 31static int __init setup_debug_locks_verbose(char *str)
30{ 32{
31 get_option(&str, &debug_locks_verbose); 33 get_option(&str, &debug_locks_verbose);
@@ -42,6 +44,10 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
42#define LOCKTYPE_RWLOCK 0x2 44#define LOCKTYPE_RWLOCK 0x2
43#define LOCKTYPE_MUTEX 0x4 45#define LOCKTYPE_MUTEX 0x4
44#define LOCKTYPE_RWSEM 0x8 46#define LOCKTYPE_RWSEM 0x8
47#define LOCKTYPE_WW 0x10
48
49static struct ww_acquire_ctx t, t2;
50static struct ww_mutex o, o2, o3;
45 51
46/* 52/*
47 * Normal standalone locks, for the circular and irq-context 53 * Normal standalone locks, for the circular and irq-context
@@ -193,6 +199,20 @@ static void init_shared_classes(void)
193#define RSU(x) up_read(&rwsem_##x) 199#define RSU(x) up_read(&rwsem_##x)
194#define RWSI(x) init_rwsem(&rwsem_##x) 200#define RWSI(x) init_rwsem(&rwsem_##x)
195 201
202#ifndef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
203#define WWAI(x) ww_acquire_init(x, &ww_lockdep)
204#else
205#define WWAI(x) do { ww_acquire_init(x, &ww_lockdep); (x)->deadlock_inject_countdown = ~0U; } while (0)
206#endif
207#define WWAD(x) ww_acquire_done(x)
208#define WWAF(x) ww_acquire_fini(x)
209
210#define WWL(x, c) ww_mutex_lock(x, c)
211#define WWT(x) ww_mutex_trylock(x)
212#define WWL1(x) ww_mutex_lock(x, NULL)
213#define WWU(x) ww_mutex_unlock(x)
214
215
196#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x) 216#define LOCK_UNLOCK_2(x,y) LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
197 217
198/* 218/*
@@ -894,11 +914,13 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
894# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map) 914# define I_RWLOCK(x) lockdep_reset_lock(&rwlock_##x.dep_map)
895# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) 915# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
896# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) 916# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
917# define I_WW(x) lockdep_reset_lock(&x.dep_map)
897#else 918#else
898# define I_SPINLOCK(x) 919# define I_SPINLOCK(x)
899# define I_RWLOCK(x) 920# define I_RWLOCK(x)
900# define I_MUTEX(x) 921# define I_MUTEX(x)
901# define I_RWSEM(x) 922# define I_RWSEM(x)
923# define I_WW(x)
902#endif 924#endif
903 925
904#define I1(x) \ 926#define I1(x) \
@@ -920,11 +942,20 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft)
920static void reset_locks(void) 942static void reset_locks(void)
921{ 943{
922 local_irq_disable(); 944 local_irq_disable();
945 lockdep_free_key_range(&ww_lockdep.acquire_key, 1);
946 lockdep_free_key_range(&ww_lockdep.mutex_key, 1);
947
923 I1(A); I1(B); I1(C); I1(D); 948 I1(A); I1(B); I1(C); I1(D);
924 I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2); 949 I1(X1); I1(X2); I1(Y1); I1(Y2); I1(Z1); I1(Z2);
950 I_WW(t); I_WW(t2); I_WW(o.base); I_WW(o2.base); I_WW(o3.base);
925 lockdep_reset(); 951 lockdep_reset();
926 I2(A); I2(B); I2(C); I2(D); 952 I2(A); I2(B); I2(C); I2(D);
927 init_shared_classes(); 953 init_shared_classes();
954
955 ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
956 memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
957 memset(&ww_lockdep.acquire_key, 0, sizeof(ww_lockdep.acquire_key));
958 memset(&ww_lockdep.mutex_key, 0, sizeof(ww_lockdep.mutex_key));
928 local_irq_enable(); 959 local_irq_enable();
929} 960}
930 961
@@ -938,7 +969,6 @@ static int unexpected_testcase_failures;
938static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) 969static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
939{ 970{
940 unsigned long saved_preempt_count = preempt_count(); 971 unsigned long saved_preempt_count = preempt_count();
941 int expected_failure = 0;
942 972
943 WARN_ON(irqs_disabled()); 973 WARN_ON(irqs_disabled());
944 974
@@ -947,25 +977,17 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
947 * Filter out expected failures: 977 * Filter out expected failures:
948 */ 978 */
949#ifndef CONFIG_PROVE_LOCKING 979#ifndef CONFIG_PROVE_LOCKING
950 if ((lockclass_mask & LOCKTYPE_SPIN) && debug_locks != expected) 980 if (expected == FAILURE && debug_locks) {
951 expected_failure = 1; 981 expected_testcase_failures++;
952 if ((lockclass_mask & LOCKTYPE_RWLOCK) && debug_locks != expected) 982 printk("failed|");
953 expected_failure = 1; 983 }
954 if ((lockclass_mask & LOCKTYPE_MUTEX) && debug_locks != expected) 984 else
955 expected_failure = 1;
956 if ((lockclass_mask & LOCKTYPE_RWSEM) && debug_locks != expected)
957 expected_failure = 1;
958#endif 985#endif
959 if (debug_locks != expected) { 986 if (debug_locks != expected) {
960 if (expected_failure) { 987 unexpected_testcase_failures++;
961 expected_testcase_failures++; 988 printk("FAILED|");
962 printk("failed|"); 989
963 } else { 990 dump_stack();
964 unexpected_testcase_failures++;
965
966 printk("FAILED|");
967 dump_stack();
968 }
969 } else { 991 } else {
970 testcase_successes++; 992 testcase_successes++;
971 printk(" ok |"); 993 printk(" ok |");
@@ -1108,6 +1130,666 @@ static inline void print_testname(const char *testname)
1108 DO_TESTCASE_6IRW(desc, name, 312); \ 1130 DO_TESTCASE_6IRW(desc, name, 312); \
1109 DO_TESTCASE_6IRW(desc, name, 321); 1131 DO_TESTCASE_6IRW(desc, name, 321);
1110 1132
1133static void ww_test_fail_acquire(void)
1134{
1135 int ret;
1136
1137 WWAI(&t);
1138 t.stamp++;
1139
1140 ret = WWL(&o, &t);
1141
1142 if (WARN_ON(!o.ctx) ||
1143 WARN_ON(ret))
1144 return;
1145
1146 /* No lockdep test, pure API */
1147 ret = WWL(&o, &t);
1148 WARN_ON(ret != -EALREADY);
1149
1150 ret = WWT(&o);
1151 WARN_ON(ret);
1152
1153 t2 = t;
1154 t2.stamp++;
1155 ret = WWL(&o, &t2);
1156 WARN_ON(ret != -EDEADLK);
1157 WWU(&o);
1158
1159 if (WWT(&o))
1160 WWU(&o);
1161#ifdef CONFIG_DEBUG_LOCK_ALLOC
1162 else
1163 DEBUG_LOCKS_WARN_ON(1);
1164#endif
1165}
1166
1167static void ww_test_normal(void)
1168{
1169 int ret;
1170
1171 WWAI(&t);
1172
1173 /*
1174 * None of the ww_mutex codepaths should be taken in the 'normal'
1175 * mutex calls. The easiest way to verify this is by using the
1176 * normal mutex calls, and making sure o.ctx is unmodified.
1177 */
1178
1179 /* mutex_lock (and indirectly, mutex_lock_nested) */
1180 o.ctx = (void *)~0UL;
1181 mutex_lock(&o.base);
1182 mutex_unlock(&o.base);
1183 WARN_ON(o.ctx != (void *)~0UL);
1184
1185 /* mutex_lock_interruptible (and *_nested) */
1186 o.ctx = (void *)~0UL;
1187 ret = mutex_lock_interruptible(&o.base);
1188 if (!ret)
1189 mutex_unlock(&o.base);
1190 else
1191 WARN_ON(1);
1192 WARN_ON(o.ctx != (void *)~0UL);
1193
1194 /* mutex_lock_killable (and *_nested) */
1195 o.ctx = (void *)~0UL;
1196 ret = mutex_lock_killable(&o.base);
1197 if (!ret)
1198 mutex_unlock(&o.base);
1199 else
1200 WARN_ON(1);
1201 WARN_ON(o.ctx != (void *)~0UL);
1202
1203 /* trylock, succeeding */
1204 o.ctx = (void *)~0UL;
1205 ret = mutex_trylock(&o.base);
1206 WARN_ON(!ret);
1207 if (ret)
1208 mutex_unlock(&o.base);
1209 else
1210 WARN_ON(1);
1211 WARN_ON(o.ctx != (void *)~0UL);
1212
1213 /* trylock, failing */
1214 o.ctx = (void *)~0UL;
1215 mutex_lock(&o.base);
1216 ret = mutex_trylock(&o.base);
1217 WARN_ON(ret);
1218 mutex_unlock(&o.base);
1219 WARN_ON(o.ctx != (void *)~0UL);
1220
1221 /* nest_lock */
1222 o.ctx = (void *)~0UL;
1223 mutex_lock_nest_lock(&o.base, &t);
1224 mutex_unlock(&o.base);
1225 WARN_ON(o.ctx != (void *)~0UL);
1226}
1227
1228static void ww_test_two_contexts(void)
1229{
1230 WWAI(&t);
1231 WWAI(&t2);
1232}
1233
1234static void ww_test_diff_class(void)
1235{
1236 WWAI(&t);
1237#ifdef CONFIG_DEBUG_MUTEXES
1238 t.ww_class = NULL;
1239#endif
1240 WWL(&o, &t);
1241}
1242
1243static void ww_test_context_done_twice(void)
1244{
1245 WWAI(&t);
1246 WWAD(&t);
1247 WWAD(&t);
1248 WWAF(&t);
1249}
1250
1251static void ww_test_context_unlock_twice(void)
1252{
1253 WWAI(&t);
1254 WWAD(&t);
1255 WWAF(&t);
1256 WWAF(&t);
1257}
1258
1259static void ww_test_context_fini_early(void)
1260{
1261 WWAI(&t);
1262 WWL(&o, &t);
1263 WWAD(&t);
1264 WWAF(&t);
1265}
1266
1267static void ww_test_context_lock_after_done(void)
1268{
1269 WWAI(&t);
1270 WWAD(&t);
1271 WWL(&o, &t);
1272}
1273
1274static void ww_test_object_unlock_twice(void)
1275{
1276 WWL1(&o);
1277 WWU(&o);
1278 WWU(&o);
1279}
1280
1281static void ww_test_object_lock_unbalanced(void)
1282{
1283 WWAI(&t);
1284 WWL(&o, &t);
1285 t.acquired = 0;
1286 WWU(&o);
1287 WWAF(&t);
1288}
1289
1290static void ww_test_object_lock_stale_context(void)
1291{
1292 WWAI(&t);
1293 o.ctx = &t2;
1294 WWL(&o, &t);
1295}
1296
1297static void ww_test_edeadlk_normal(void)
1298{
1299 int ret;
1300
1301 mutex_lock(&o2.base);
1302 o2.ctx = &t2;
1303 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1304
1305 WWAI(&t);
1306 t2 = t;
1307 t2.stamp--;
1308
1309 ret = WWL(&o, &t);
1310 WARN_ON(ret);
1311
1312 ret = WWL(&o2, &t);
1313 WARN_ON(ret != -EDEADLK);
1314
1315 o2.ctx = NULL;
1316 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1317 mutex_unlock(&o2.base);
1318 WWU(&o);
1319
1320 WWL(&o2, &t);
1321}
1322
1323static void ww_test_edeadlk_normal_slow(void)
1324{
1325 int ret;
1326
1327 mutex_lock(&o2.base);
1328 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1329 o2.ctx = &t2;
1330
1331 WWAI(&t);
1332 t2 = t;
1333 t2.stamp--;
1334
1335 ret = WWL(&o, &t);
1336 WARN_ON(ret);
1337
1338 ret = WWL(&o2, &t);
1339 WARN_ON(ret != -EDEADLK);
1340
1341 o2.ctx = NULL;
1342 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1343 mutex_unlock(&o2.base);
1344 WWU(&o);
1345
1346 ww_mutex_lock_slow(&o2, &t);
1347}
1348
1349static void ww_test_edeadlk_no_unlock(void)
1350{
1351 int ret;
1352
1353 mutex_lock(&o2.base);
1354 o2.ctx = &t2;
1355 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1356
1357 WWAI(&t);
1358 t2 = t;
1359 t2.stamp--;
1360
1361 ret = WWL(&o, &t);
1362 WARN_ON(ret);
1363
1364 ret = WWL(&o2, &t);
1365 WARN_ON(ret != -EDEADLK);
1366
1367 o2.ctx = NULL;
1368 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1369 mutex_unlock(&o2.base);
1370
1371 WWL(&o2, &t);
1372}
1373
1374static void ww_test_edeadlk_no_unlock_slow(void)
1375{
1376 int ret;
1377
1378 mutex_lock(&o2.base);
1379 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1380 o2.ctx = &t2;
1381
1382 WWAI(&t);
1383 t2 = t;
1384 t2.stamp--;
1385
1386 ret = WWL(&o, &t);
1387 WARN_ON(ret);
1388
1389 ret = WWL(&o2, &t);
1390 WARN_ON(ret != -EDEADLK);
1391
1392 o2.ctx = NULL;
1393 mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
1394 mutex_unlock(&o2.base);
1395
1396 ww_mutex_lock_slow(&o2, &t);
1397}
1398
1399static void ww_test_edeadlk_acquire_more(void)
1400{
1401 int ret;
1402
1403 mutex_lock(&o2.base);
1404 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1405 o2.ctx = &t2;
1406
1407 WWAI(&t);
1408 t2 = t;
1409 t2.stamp--;
1410
1411 ret = WWL(&o, &t);
1412 WARN_ON(ret);
1413
1414 ret = WWL(&o2, &t);
1415 WARN_ON(ret != -EDEADLK);
1416
1417 ret = WWL(&o3, &t);
1418}
1419
1420static void ww_test_edeadlk_acquire_more_slow(void)
1421{
1422 int ret;
1423
1424 mutex_lock(&o2.base);
1425 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1426 o2.ctx = &t2;
1427
1428 WWAI(&t);
1429 t2 = t;
1430 t2.stamp--;
1431
1432 ret = WWL(&o, &t);
1433 WARN_ON(ret);
1434
1435 ret = WWL(&o2, &t);
1436 WARN_ON(ret != -EDEADLK);
1437
1438 ww_mutex_lock_slow(&o3, &t);
1439}
1440
1441static void ww_test_edeadlk_acquire_more_edeadlk(void)
1442{
1443 int ret;
1444
1445 mutex_lock(&o2.base);
1446 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1447 o2.ctx = &t2;
1448
1449 mutex_lock(&o3.base);
1450 mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
1451 o3.ctx = &t2;
1452
1453 WWAI(&t);
1454 t2 = t;
1455 t2.stamp--;
1456
1457 ret = WWL(&o, &t);
1458 WARN_ON(ret);
1459
1460 ret = WWL(&o2, &t);
1461 WARN_ON(ret != -EDEADLK);
1462
1463 ret = WWL(&o3, &t);
1464 WARN_ON(ret != -EDEADLK);
1465}
1466
1467static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
1468{
1469 int ret;
1470
1471 mutex_lock(&o2.base);
1472 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1473 o2.ctx = &t2;
1474
1475 mutex_lock(&o3.base);
1476 mutex_release(&o3.base.dep_map, 1, _THIS_IP_);
1477 o3.ctx = &t2;
1478
1479 WWAI(&t);
1480 t2 = t;
1481 t2.stamp--;
1482
1483 ret = WWL(&o, &t);
1484 WARN_ON(ret);
1485
1486 ret = WWL(&o2, &t);
1487 WARN_ON(ret != -EDEADLK);
1488
1489 ww_mutex_lock_slow(&o3, &t);
1490}
1491
1492static void ww_test_edeadlk_acquire_wrong(void)
1493{
1494 int ret;
1495
1496 mutex_lock(&o2.base);
1497 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1498 o2.ctx = &t2;
1499
1500 WWAI(&t);
1501 t2 = t;
1502 t2.stamp--;
1503
1504 ret = WWL(&o, &t);
1505 WARN_ON(ret);
1506
1507 ret = WWL(&o2, &t);
1508 WARN_ON(ret != -EDEADLK);
1509 if (!ret)
1510 WWU(&o2);
1511
1512 WWU(&o);
1513
1514 ret = WWL(&o3, &t);
1515}
1516
1517static void ww_test_edeadlk_acquire_wrong_slow(void)
1518{
1519 int ret;
1520
1521 mutex_lock(&o2.base);
1522 mutex_release(&o2.base.dep_map, 1, _THIS_IP_);
1523 o2.ctx = &t2;
1524
1525 WWAI(&t);
1526 t2 = t;
1527 t2.stamp--;
1528
1529 ret = WWL(&o, &t);
1530 WARN_ON(ret);
1531
1532 ret = WWL(&o2, &t);
1533 WARN_ON(ret != -EDEADLK);
1534 if (!ret)
1535 WWU(&o2);
1536
1537 WWU(&o);
1538
1539 ww_mutex_lock_slow(&o3, &t);
1540}
1541
1542static void ww_test_spin_nest_unlocked(void)
1543{
1544 raw_spin_lock_nest_lock(&lock_A, &o.base);
1545 U(A);
1546}
1547
1548static void ww_test_unneeded_slow(void)
1549{
1550 WWAI(&t);
1551
1552 ww_mutex_lock_slow(&o, &t);
1553}
1554
1555static void ww_test_context_block(void)
1556{
1557 int ret;
1558
1559 WWAI(&t);
1560
1561 ret = WWL(&o, &t);
1562 WARN_ON(ret);
1563 WWL1(&o2);
1564}
1565
1566static void ww_test_context_try(void)
1567{
1568 int ret;
1569
1570 WWAI(&t);
1571
1572 ret = WWL(&o, &t);
1573 WARN_ON(ret);
1574
1575 ret = WWT(&o2);
1576 WARN_ON(!ret);
1577 WWU(&o2);
1578 WWU(&o);
1579}
1580
1581static void ww_test_context_context(void)
1582{
1583 int ret;
1584
1585 WWAI(&t);
1586
1587 ret = WWL(&o, &t);
1588 WARN_ON(ret);
1589
1590 ret = WWL(&o2, &t);
1591 WARN_ON(ret);
1592
1593 WWU(&o2);
1594 WWU(&o);
1595}
1596
1597static void ww_test_try_block(void)
1598{
1599 bool ret;
1600
1601 ret = WWT(&o);
1602 WARN_ON(!ret);
1603
1604 WWL1(&o2);
1605 WWU(&o2);
1606 WWU(&o);
1607}
1608
1609static void ww_test_try_try(void)
1610{
1611 bool ret;
1612
1613 ret = WWT(&o);
1614 WARN_ON(!ret);
1615 ret = WWT(&o2);
1616 WARN_ON(!ret);
1617 WWU(&o2);
1618 WWU(&o);
1619}
1620
1621static void ww_test_try_context(void)
1622{
1623 int ret;
1624
1625 ret = WWT(&o);
1626 WARN_ON(!ret);
1627
1628 WWAI(&t);
1629
1630 ret = WWL(&o2, &t);
1631 WARN_ON(ret);
1632}
1633
1634static void ww_test_block_block(void)
1635{
1636 WWL1(&o);
1637 WWL1(&o2);
1638}
1639
1640static void ww_test_block_try(void)
1641{
1642 bool ret;
1643
1644 WWL1(&o);
1645 ret = WWT(&o2);
1646 WARN_ON(!ret);
1647}
1648
1649static void ww_test_block_context(void)
1650{
1651 int ret;
1652
1653 WWL1(&o);
1654 WWAI(&t);
1655
1656 ret = WWL(&o2, &t);
1657 WARN_ON(ret);
1658}
1659
1660static void ww_test_spin_block(void)
1661{
1662 L(A);
1663 U(A);
1664
1665 WWL1(&o);
1666 L(A);
1667 U(A);
1668 WWU(&o);
1669
1670 L(A);
1671 WWL1(&o);
1672 WWU(&o);
1673 U(A);
1674}
1675
1676static void ww_test_spin_try(void)
1677{
1678 bool ret;
1679
1680 L(A);
1681 U(A);
1682
1683 ret = WWT(&o);
1684 WARN_ON(!ret);
1685 L(A);
1686 U(A);
1687 WWU(&o);
1688
1689 L(A);
1690 ret = WWT(&o);
1691 WARN_ON(!ret);
1692 WWU(&o);
1693 U(A);
1694}
1695
1696static void ww_test_spin_context(void)
1697{
1698 int ret;
1699
1700 L(A);
1701 U(A);
1702
1703 WWAI(&t);
1704
1705 ret = WWL(&o, &t);
1706 WARN_ON(ret);
1707 L(A);
1708 U(A);
1709 WWU(&o);
1710
1711 L(A);
1712 ret = WWL(&o, &t);
1713 WARN_ON(ret);
1714 WWU(&o);
1715 U(A);
1716}
1717
1718static void ww_tests(void)
1719{
1720 printk(" --------------------------------------------------------------------------\n");
1721 printk(" | Wound/wait tests |\n");
1722 printk(" ---------------------\n");
1723
1724 print_testname("ww api failures");
1725 dotest(ww_test_fail_acquire, SUCCESS, LOCKTYPE_WW);
1726 dotest(ww_test_normal, SUCCESS, LOCKTYPE_WW);
1727 dotest(ww_test_unneeded_slow, FAILURE, LOCKTYPE_WW);
1728 printk("\n");
1729
1730 print_testname("ww contexts mixing");
1731 dotest(ww_test_two_contexts, FAILURE, LOCKTYPE_WW);
1732 dotest(ww_test_diff_class, FAILURE, LOCKTYPE_WW);
1733 printk("\n");
1734
1735 print_testname("finishing ww context");
1736 dotest(ww_test_context_done_twice, FAILURE, LOCKTYPE_WW);
1737 dotest(ww_test_context_unlock_twice, FAILURE, LOCKTYPE_WW);
1738 dotest(ww_test_context_fini_early, FAILURE, LOCKTYPE_WW);
1739 dotest(ww_test_context_lock_after_done, FAILURE, LOCKTYPE_WW);
1740 printk("\n");
1741
1742 print_testname("locking mismatches");
1743 dotest(ww_test_object_unlock_twice, FAILURE, LOCKTYPE_WW);
1744 dotest(ww_test_object_lock_unbalanced, FAILURE, LOCKTYPE_WW);
1745 dotest(ww_test_object_lock_stale_context, FAILURE, LOCKTYPE_WW);
1746 printk("\n");
1747
1748 print_testname("EDEADLK handling");
1749 dotest(ww_test_edeadlk_normal, SUCCESS, LOCKTYPE_WW);
1750 dotest(ww_test_edeadlk_normal_slow, SUCCESS, LOCKTYPE_WW);
1751 dotest(ww_test_edeadlk_no_unlock, FAILURE, LOCKTYPE_WW);
1752 dotest(ww_test_edeadlk_no_unlock_slow, FAILURE, LOCKTYPE_WW);
1753 dotest(ww_test_edeadlk_acquire_more, FAILURE, LOCKTYPE_WW);
1754 dotest(ww_test_edeadlk_acquire_more_slow, FAILURE, LOCKTYPE_WW);
1755 dotest(ww_test_edeadlk_acquire_more_edeadlk, FAILURE, LOCKTYPE_WW);
1756 dotest(ww_test_edeadlk_acquire_more_edeadlk_slow, FAILURE, LOCKTYPE_WW);
1757 dotest(ww_test_edeadlk_acquire_wrong, FAILURE, LOCKTYPE_WW);
1758 dotest(ww_test_edeadlk_acquire_wrong_slow, FAILURE, LOCKTYPE_WW);
1759 printk("\n");
1760
1761 print_testname("spinlock nest unlocked");
1762 dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
1763 printk("\n");
1764
1765 printk(" -----------------------------------------------------\n");
1766 printk(" |block | try |context|\n");
1767 printk(" -----------------------------------------------------\n");
1768
1769 print_testname("context");
1770 dotest(ww_test_context_block, FAILURE, LOCKTYPE_WW);
1771 dotest(ww_test_context_try, SUCCESS, LOCKTYPE_WW);
1772 dotest(ww_test_context_context, SUCCESS, LOCKTYPE_WW);
1773 printk("\n");
1774
1775 print_testname("try");
1776 dotest(ww_test_try_block, FAILURE, LOCKTYPE_WW);
1777 dotest(ww_test_try_try, SUCCESS, LOCKTYPE_WW);
1778 dotest(ww_test_try_context, FAILURE, LOCKTYPE_WW);
1779 printk("\n");
1780
1781 print_testname("block");
1782 dotest(ww_test_block_block, FAILURE, LOCKTYPE_WW);
1783 dotest(ww_test_block_try, SUCCESS, LOCKTYPE_WW);
1784 dotest(ww_test_block_context, FAILURE, LOCKTYPE_WW);
1785 printk("\n");
1786
1787 print_testname("spinlock");
1788 dotest(ww_test_spin_block, FAILURE, LOCKTYPE_WW);
1789 dotest(ww_test_spin_try, SUCCESS, LOCKTYPE_WW);
1790 dotest(ww_test_spin_context, FAILURE, LOCKTYPE_WW);
1791 printk("\n");
1792}
1111 1793
1112void locking_selftest(void) 1794void locking_selftest(void)
1113{ 1795{
@@ -1188,6 +1870,8 @@ void locking_selftest(void)
1188 DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); 1870 DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
1189// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); 1871// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
1190 1872
1873 ww_tests();
1874
1191 if (unexpected_testcase_failures) { 1875 if (unexpected_testcase_failures) {
1192 printk("-----------------------------------------------------------------\n"); 1876 printk("-----------------------------------------------------------------\n");
1193 debug_locks = 0; 1877 debug_locks = 0;