aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/include/asm/mutex.h9
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arm/include/asm/mutex.h21
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/mutex.h9
-rw-r--r--arch/blackfin/include/asm/Kbuild1
-rw-r--r--arch/c6x/include/asm/mutex.h6
-rw-r--r--arch/cris/include/asm/mutex.h9
-rw-r--r--arch/frv/include/asm/mutex.h9
-rw-r--r--arch/h8300/include/asm/mutex.h9
-rw-r--r--arch/hexagon/include/asm/mutex.h8
-rw-r--r--arch/ia64/include/asm/mutex.h90
-rw-r--r--arch/m32r/include/asm/mutex.h9
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/metag/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/mutex.h1
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mn10300/include/asm/mutex.h16
-rw-r--r--arch/nios2/include/asm/mutex.h1
-rw-r--r--arch/openrisc/include/asm/mutex.h27
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/mutex.h132
-rw-r--r--arch/s390/include/asm/mutex.h9
-rw-r--r--arch/score/include/asm/mutex.h6
-rw-r--r--arch/sh/include/asm/mutex-llsc.h109
-rw-r--r--arch/sh/include/asm/mutex.h12
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/um/include/asm/Kbuild1
-rw-r--r--arch/unicore32/include/asm/mutex.h20
-rw-r--r--arch/x86/include/asm/mutex.h5
-rw-r--r--arch/x86/include/asm/mutex_32.h110
-rw-r--r--arch/x86/include/asm/mutex_64.h127
-rw-r--r--arch/xtensa/include/asm/mutex.h9
-rw-r--r--include/asm-generic/mutex-dec.h88
-rw-r--r--include/asm-generic/mutex-null.h19
-rw-r--r--include/asm-generic/mutex-xchg.h120
-rw-r--r--include/asm-generic/mutex.h9
38 files changed, 0 insertions, 1026 deletions
diff --git a/arch/alpha/include/asm/mutex.h b/arch/alpha/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/alpha/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
deleted file mode 100644
index a2f88ff9f506..000000000000
--- a/arch/arc/include/asm/mutex.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
11 * atomic dec based which can "count" any number of lock contenders.
12 * This ideally needs to be fixed in core, but for now switching to dec ver.
13 */
14#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
15#include <asm-generic/mutex-dec.h>
16#else
17#include <asm-generic/mutex-xchg.h>
18#endif
diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h
deleted file mode 100644
index 87c044910fe0..000000000000
--- a/arch/arm/include/asm/mutex.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * arch/arm/include/asm/mutex.h
3 *
4 * ARM optimized mutex locking primitives
5 *
6 * Please look into asm-generic/mutex-xchg.h for a formal definition.
7 */
8#ifndef _ASM_MUTEX_H
9#define _ASM_MUTEX_H
10/*
11 * On pre-ARMv6 hardware this results in a swp-based implementation,
12 * which is the most efficient. For ARMv6+, we have exclusive memory
13 * accessors and use atomic_dec to avoid the extra xchg operations
14 * on the locking slowpaths.
15 */
16#if __LINUX_ARM_ARCH__ < 6
17#include <asm-generic/mutex-xchg.h>
18#else
19#include <asm-generic/mutex-dec.h>
20#endif
21#endif /* _ASM_MUTEX_H */
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 44e1d7f10add..b4ab238a59ec 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -24,7 +24,6 @@ generic-y += mm-arch-hooks.h
24generic-y += mman.h 24generic-y += mman.h
25generic-y += msgbuf.h 25generic-y += msgbuf.h
26generic-y += msi.h 26generic-y += msi.h
27generic-y += mutex.h
28generic-y += poll.h 27generic-y += poll.h
29generic-y += preempt.h 28generic-y += preempt.h
30generic-y += resource.h 29generic-y += resource.h
diff --git a/arch/avr32/include/asm/mutex.h b/arch/avr32/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/avr32/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild
index 91d49c0a3118..2fb67b59d188 100644
--- a/arch/blackfin/include/asm/Kbuild
+++ b/arch/blackfin/include/asm/Kbuild
@@ -24,7 +24,6 @@ generic-y += mcs_spinlock.h
24generic-y += mm-arch-hooks.h 24generic-y += mm-arch-hooks.h
25generic-y += mman.h 25generic-y += mman.h
26generic-y += msgbuf.h 26generic-y += msgbuf.h
27generic-y += mutex.h
28generic-y += param.h 27generic-y += param.h
29generic-y += percpu.h 28generic-y += percpu.h
30generic-y += pgalloc.h 29generic-y += pgalloc.h
diff --git a/arch/c6x/include/asm/mutex.h b/arch/c6x/include/asm/mutex.h
deleted file mode 100644
index 7a7248e0462d..000000000000
--- a/arch/c6x/include/asm/mutex.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_C6X_MUTEX_H
2#define _ASM_C6X_MUTEX_H
3
4#include <asm-generic/mutex-null.h>
5
6#endif /* _ASM_C6X_MUTEX_H */
diff --git a/arch/cris/include/asm/mutex.h b/arch/cris/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/cris/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/frv/include/asm/mutex.h b/arch/frv/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/frv/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/h8300/include/asm/mutex.h b/arch/h8300/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/h8300/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/hexagon/include/asm/mutex.h b/arch/hexagon/include/asm/mutex.h
deleted file mode 100644
index 58b52de1bc22..000000000000
--- a/arch/hexagon/include/asm/mutex.h
+++ /dev/null
@@ -1,8 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8#include <asm-generic/mutex-xchg.h>
diff --git a/arch/ia64/include/asm/mutex.h b/arch/ia64/include/asm/mutex.h
deleted file mode 100644
index 28cb819e0ff9..000000000000
--- a/arch/ia64/include/asm/mutex.h
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * ia64 implementation of the mutex fastpath.
3 *
4 * Copyright (C) 2006 Ken Chen <kenneth.w.chen@intel.com>
5 *
6 */
7
8#ifndef _ASM_MUTEX_H
9#define _ASM_MUTEX_H
10
11/**
12 * __mutex_fastpath_lock - try to take the lock by moving the count
13 * from 1 to a 0 value
14 * @count: pointer of type atomic_t
15 * @fail_fn: function to call if the original value was not 1
16 *
17 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
18 * it wasn't 1 originally. This function MUST leave the value lower than
19 * 1 even when the "1" assertion wasn't true.
20 */
21static inline void
22__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
23{
24 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
25 fail_fn(count);
26}
27
28/**
29 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
30 * from 1 to a 0 value
31 * @count: pointer of type atomic_t
32 *
33 * Change the count from 1 to a value lower than 1. This function returns 0
34 * if the fastpath succeeds, or -1 otherwise.
35 */
36static inline int
37__mutex_fastpath_lock_retval(atomic_t *count)
38{
39 if (unlikely(ia64_fetchadd4_acq(count, -1) != 1))
40 return -1;
41 return 0;
42}
43
44/**
45 * __mutex_fastpath_unlock - try to promote the count from 0 to 1
46 * @count: pointer of type atomic_t
47 * @fail_fn: function to call if the original value was not 0
48 *
49 * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
50 * In the failure case, this function is allowed to either set the value to
51 * 1, or to set it to a value lower than 1.
52 *
53 * If the implementation sets it to a value of lower than 1, then the
54 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
55 * to return 0 otherwise.
56 */
57static inline void
58__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
59{
60 int ret = ia64_fetchadd4_rel(count, 1);
61 if (unlikely(ret < 0))
62 fail_fn(count);
63}
64
65#define __mutex_slowpath_needs_to_unlock() 1
66
67/**
68 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
69 *
70 * @count: pointer of type atomic_t
71 * @fail_fn: fallback function
72 *
73 * Change the count from 1 to a value lower than 1, and return 0 (failure)
74 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
75 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
76 * Additionally, if the value was < 0 originally, this function must not leave
77 * it to 0 on failure.
78 *
79 * If the architecture has no effective trylock variant, it should call the
80 * <fail_fn> spinlock-based trylock variant unconditionally.
81 */
82static inline int
83__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
84{
85 if (atomic_read(count) == 1 && cmpxchg_acq(count, 1, 0) == 1)
86 return 1;
87 return 0;
88}
89
90#endif
diff --git a/arch/m32r/include/asm/mutex.h b/arch/m32r/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/m32r/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index eb85bd9c6180..1f2e5d31cb24 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -20,7 +20,6 @@ generic-y += local64.h
20generic-y += mcs_spinlock.h 20generic-y += mcs_spinlock.h
21generic-y += mm-arch-hooks.h 21generic-y += mm-arch-hooks.h
22generic-y += mman.h 22generic-y += mman.h
23generic-y += mutex.h
24generic-y += percpu.h 23generic-y += percpu.h
25generic-y += preempt.h 24generic-y += preempt.h
26generic-y += resource.h 25generic-y += resource.h
diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild
index 29acb89daaaa..167150c701d1 100644
--- a/arch/metag/include/asm/Kbuild
+++ b/arch/metag/include/asm/Kbuild
@@ -27,7 +27,6 @@ generic-y += local64.h
27generic-y += mcs_spinlock.h 27generic-y += mcs_spinlock.h
28generic-y += mm-arch-hooks.h 28generic-y += mm-arch-hooks.h
29generic-y += msgbuf.h 29generic-y += msgbuf.h
30generic-y += mutex.h
31generic-y += param.h 30generic-y += param.h
32generic-y += pci.h 31generic-y += pci.h
33generic-y += percpu.h 32generic-y += percpu.h
diff --git a/arch/microblaze/include/asm/mutex.h b/arch/microblaze/include/asm/mutex.h
deleted file mode 100644
index ff6101aa2c71..000000000000
--- a/arch/microblaze/include/asm/mutex.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/mutex-dec.h>
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 9740066cc631..3269b742a75e 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -9,7 +9,6 @@ generic-y += irq_work.h
9generic-y += local64.h 9generic-y += local64.h
10generic-y += mcs_spinlock.h 10generic-y += mcs_spinlock.h
11generic-y += mm-arch-hooks.h 11generic-y += mm-arch-hooks.h
12generic-y += mutex.h
13generic-y += parport.h 12generic-y += parport.h
14generic-y += percpu.h 13generic-y += percpu.h
15generic-y += preempt.h 14generic-y += preempt.h
diff --git a/arch/mn10300/include/asm/mutex.h b/arch/mn10300/include/asm/mutex.h
deleted file mode 100644
index 84f5490c6fb4..000000000000
--- a/arch/mn10300/include/asm/mutex.h
+++ /dev/null
@@ -1,16 +0,0 @@
1/* MN10300 Mutex fastpath
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 *
11 *
12 * TODO: implement optimized primitives instead, or leave the generic
13 * implementation in place, or pick the atomic_xchg() based generic
14 * implementation. (see asm-generic/mutex-xchg.h for details)
15 */
16#include <asm-generic/mutex-null.h>
diff --git a/arch/nios2/include/asm/mutex.h b/arch/nios2/include/asm/mutex.h
deleted file mode 100644
index ff6101aa2c71..000000000000
--- a/arch/nios2/include/asm/mutex.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/mutex-dec.h>
diff --git a/arch/openrisc/include/asm/mutex.h b/arch/openrisc/include/asm/mutex.h
deleted file mode 100644
index b85a0cfa9fc9..000000000000
--- a/arch/openrisc/include/asm/mutex.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * OpenRISC Linux
3 *
4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
6 * declaration.
7 *
8 * OpenRISC implementation:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11 * et al.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 */
18
19/*
20 * Pull in the generic implementation for the mutex fastpath.
21 *
22 * TODO: implement optimized primitives instead, or leave the generic
23 * implementation in place, or pick the atomic_xchg() based generic
24 * implementation. (see asm-generic/mutex-xchg.h for details)
25 */
26
27#include <asm-generic/mutex-dec.h>
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index f9b3a81aefcd..91f53c07f410 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += local.h
16generic-y += local64.h 16generic-y += local64.h
17generic-y += mcs_spinlock.h 17generic-y += mcs_spinlock.h
18generic-y += mm-arch-hooks.h 18generic-y += mm-arch-hooks.h
19generic-y += mutex.h
20generic-y += param.h 19generic-y += param.h
21generic-y += percpu.h 20generic-y += percpu.h
22generic-y += poll.h 21generic-y += poll.h
diff --git a/arch/powerpc/include/asm/mutex.h b/arch/powerpc/include/asm/mutex.h
deleted file mode 100644
index 078155fa1189..000000000000
--- a/arch/powerpc/include/asm/mutex.h
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
3 */
4#ifndef _ASM_POWERPC_MUTEX_H
5#define _ASM_POWERPC_MUTEX_H
6
7static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
8{
9 int t;
10
11 __asm__ __volatile__ (
12"1: lwarx %0,0,%1 # mutex trylock\n\
13 cmpw 0,%0,%2\n\
14 bne- 2f\n"
15 PPC405_ERR77(0,%1)
16" stwcx. %3,0,%1\n\
17 bne- 1b"
18 PPC_ACQUIRE_BARRIER
19 "\n\
202:"
21 : "=&r" (t)
22 : "r" (&v->counter), "r" (old), "r" (new)
23 : "cc", "memory");
24
25 return t;
26}
27
28static inline int __mutex_dec_return_lock(atomic_t *v)
29{
30 int t;
31
32 __asm__ __volatile__(
33"1: lwarx %0,0,%1 # mutex lock\n\
34 addic %0,%0,-1\n"
35 PPC405_ERR77(0,%1)
36" stwcx. %0,0,%1\n\
37 bne- 1b"
38 PPC_ACQUIRE_BARRIER
39 : "=&r" (t)
40 : "r" (&v->counter)
41 : "cc", "memory");
42
43 return t;
44}
45
46static inline int __mutex_inc_return_unlock(atomic_t *v)
47{
48 int t;
49
50 __asm__ __volatile__(
51 PPC_RELEASE_BARRIER
52"1: lwarx %0,0,%1 # mutex unlock\n\
53 addic %0,%0,1\n"
54 PPC405_ERR77(0,%1)
55" stwcx. %0,0,%1 \n\
56 bne- 1b"
57 : "=&r" (t)
58 : "r" (&v->counter)
59 : "cc", "memory");
60
61 return t;
62}
63
64/**
65 * __mutex_fastpath_lock - try to take the lock by moving the count
66 * from 1 to a 0 value
67 * @count: pointer of type atomic_t
68 * @fail_fn: function to call if the original value was not 1
69 *
70 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
71 * it wasn't 1 originally. This function MUST leave the value lower than
72 * 1 even when the "1" assertion wasn't true.
73 */
74static inline void
75__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
76{
77 if (unlikely(__mutex_dec_return_lock(count) < 0))
78 fail_fn(count);
79}
80
81/**
82 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
83 * from 1 to a 0 value
84 * @count: pointer of type atomic_t
85 *
86 * Change the count from 1 to a value lower than 1. This function returns 0
87 * if the fastpath succeeds, or -1 otherwise.
88 */
89static inline int
90__mutex_fastpath_lock_retval(atomic_t *count)
91{
92 if (unlikely(__mutex_dec_return_lock(count) < 0))
93 return -1;
94 return 0;
95}
96
97/**
98 * __mutex_fastpath_unlock - try to promote the count from 0 to 1
99 * @count: pointer of type atomic_t
100 * @fail_fn: function to call if the original value was not 0
101 *
102 * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
103 * In the failure case, this function is allowed to either set the value to
104 * 1, or to set it to a value lower than 1.
105 */
106static inline void
107__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
108{
109 if (unlikely(__mutex_inc_return_unlock(count) <= 0))
110 fail_fn(count);
111}
112
113#define __mutex_slowpath_needs_to_unlock() 1
114
115/**
116 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
117 *
118 * @count: pointer of type atomic_t
119 * @fail_fn: fallback function
120 *
121 * Change the count from 1 to 0, and return 1 (success), or if the count
122 * was not 1, then return 0 (failure).
123 */
124static inline int
125__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
126{
127 if (likely(atomic_read(count) == 1 && __mutex_cmpxchg_lock(count, 1, 0) == 1))
128 return 1;
129 return 0;
130}
131
132#endif
diff --git a/arch/s390/include/asm/mutex.h b/arch/s390/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/s390/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/arch/score/include/asm/mutex.h b/arch/score/include/asm/mutex.h
deleted file mode 100644
index 10d48fe4db97..000000000000
--- a/arch/score/include/asm/mutex.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_SCORE_MUTEX_H
2#define _ASM_SCORE_MUTEX_H
3
4#include <asm-generic/mutex-dec.h>
5
6#endif /* _ASM_SCORE_MUTEX_H */
diff --git a/arch/sh/include/asm/mutex-llsc.h b/arch/sh/include/asm/mutex-llsc.h
deleted file mode 100644
index dad29b687bd3..000000000000
--- a/arch/sh/include/asm/mutex-llsc.h
+++ /dev/null
@@ -1,109 +0,0 @@
1/*
2 * arch/sh/include/asm/mutex-llsc.h
3 *
4 * SH-4A optimized mutex locking primitives
5 *
6 * Please look into asm-generic/mutex-xchg.h for a formal definition.
7 */
8#ifndef __ASM_SH_MUTEX_LLSC_H
9#define __ASM_SH_MUTEX_LLSC_H
10
11/*
12 * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
13 * with a bastardized atomic decrement (it is not a reliable atomic decrement
14 * but it satisfies the defined semantics for our purpose, while being
15 * smaller and faster than a real atomic decrement or atomic swap.
16 * The idea is to attempt decrementing the lock value only once. If once
17 * decremented it isn't zero, or if its store-back fails due to a dispute
18 * on the exclusive store, we simply bail out immediately through the slow
19 * path where the lock will be reattempted until it succeeds.
20 */
21static inline void
22__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
23{
24 int __done, __res;
25
26 __asm__ __volatile__ (
27 "movli.l @%2, %0 \n"
28 "add #-1, %0 \n"
29 "movco.l %0, @%2 \n"
30 "movt %1 \n"
31 : "=&z" (__res), "=&r" (__done)
32 : "r" (&(count)->counter)
33 : "t");
34
35 if (unlikely(!__done || __res != 0))
36 fail_fn(count);
37}
38
39static inline int
40__mutex_fastpath_lock_retval(atomic_t *count)
41{
42 int __done, __res;
43
44 __asm__ __volatile__ (
45 "movli.l @%2, %0 \n"
46 "add #-1, %0 \n"
47 "movco.l %0, @%2 \n"
48 "movt %1 \n"
49 : "=&z" (__res), "=&r" (__done)
50 : "r" (&(count)->counter)
51 : "t");
52
53 if (unlikely(!__done || __res != 0))
54 __res = -1;
55
56 return __res;
57}
58
59static inline void
60__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
61{
62 int __done, __res;
63
64 __asm__ __volatile__ (
65 "movli.l @%2, %0 \n\t"
66 "add #1, %0 \n\t"
67 "movco.l %0, @%2 \n\t"
68 "movt %1 \n\t"
69 : "=&z" (__res), "=&r" (__done)
70 : "r" (&(count)->counter)
71 : "t");
72
73 if (unlikely(!__done || __res <= 0))
74 fail_fn(count);
75}
76
77/*
78 * If the unlock was done on a contended lock, or if the unlock simply fails
79 * then the mutex remains locked.
80 */
81#define __mutex_slowpath_needs_to_unlock() 1
82
83/*
84 * For __mutex_fastpath_trylock we do an atomic decrement and check the
85 * result and put it in the __res variable.
86 */
87static inline int
88__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
89{
90 int __res, __orig;
91
92 __asm__ __volatile__ (
93 "1: movli.l @%2, %0 \n\t"
94 "dt %0 \n\t"
95 "movco.l %0,@%2 \n\t"
96 "bf 1b \n\t"
97 "cmp/eq #0,%0 \n\t"
98 "bt 2f \n\t"
99 "mov #0, %1 \n\t"
100 "bf 3f \n\t"
101 "2: mov #1, %1 \n\t"
102 "3: "
103 : "=&z" (__orig), "=&r" (__res)
104 : "r" (&count->counter)
105 : "t");
106
107 return __res;
108}
109#endif /* __ASM_SH_MUTEX_LLSC_H */
diff --git a/arch/sh/include/asm/mutex.h b/arch/sh/include/asm/mutex.h
deleted file mode 100644
index d8e37716a4a0..000000000000
--- a/arch/sh/include/asm/mutex.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8#if defined(CONFIG_CPU_SH4A)
9#include <asm/mutex-llsc.h>
10#else
11#include <asm-generic/mutex-dec.h>
12#endif
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index cfc918067f80..0569bfac4afb 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -15,7 +15,6 @@ generic-y += local64.h
15generic-y += mcs_spinlock.h 15generic-y += mcs_spinlock.h
16generic-y += mm-arch-hooks.h 16generic-y += mm-arch-hooks.h
17generic-y += module.h 17generic-y += module.h
18generic-y += mutex.h
19generic-y += preempt.h 18generic-y += preempt.h
20generic-y += rwsem.h 19generic-y += rwsem.h
21generic-y += serial.h 20generic-y += serial.h
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index ba35c41c71ff..2d1f5638974c 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -21,7 +21,6 @@ generic-y += local64.h
21generic-y += mcs_spinlock.h 21generic-y += mcs_spinlock.h
22generic-y += mm-arch-hooks.h 22generic-y += mm-arch-hooks.h
23generic-y += msgbuf.h 23generic-y += msgbuf.h
24generic-y += mutex.h
25generic-y += param.h 24generic-y += param.h
26generic-y += parport.h 25generic-y += parport.h
27generic-y += poll.h 26generic-y += poll.h
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 904f3ebf4220..052f7f6d0551 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -17,7 +17,6 @@ generic-y += irq_work.h
17generic-y += kdebug.h 17generic-y += kdebug.h
18generic-y += mcs_spinlock.h 18generic-y += mcs_spinlock.h
19generic-y += mm-arch-hooks.h 19generic-y += mm-arch-hooks.h
20generic-y += mutex.h
21generic-y += param.h 20generic-y += param.h
22generic-y += pci.h 21generic-y += pci.h
23generic-y += percpu.h 22generic-y += percpu.h
diff --git a/arch/unicore32/include/asm/mutex.h b/arch/unicore32/include/asm/mutex.h
deleted file mode 100644
index fab7d0e8adf6..000000000000
--- a/arch/unicore32/include/asm/mutex.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * linux/arch/unicore32/include/asm/mutex.h
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 *
6 * Copyright (C) 2001-2010 GUAN Xue-tao
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * UniCore optimized mutex locking primitives
13 *
14 * Please look into asm-generic/mutex-xchg.h for a formal definition.
15 */
16#ifndef __UNICORE_MUTEX_H__
17#define __UNICORE_MUTEX_H__
18
19# include <asm-generic/mutex-xchg.h>
20#endif
diff --git a/arch/x86/include/asm/mutex.h b/arch/x86/include/asm/mutex.h
deleted file mode 100644
index 7d3a48275394..000000000000
--- a/arch/x86/include/asm/mutex.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifdef CONFIG_X86_32
2# include <asm/mutex_32.h>
3#else
4# include <asm/mutex_64.h>
5#endif
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h
deleted file mode 100644
index e9355a84fc67..000000000000
--- a/arch/x86/include/asm/mutex_32.h
+++ /dev/null
@@ -1,110 +0,0 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef _ASM_X86_MUTEX_32_H
10#define _ASM_X86_MUTEX_32_H
11
12#include <asm/alternative.h>
13
14/**
15 * __mutex_fastpath_lock - try to take the lock by moving the count
16 * from 1 to a 0 value
17 * @count: pointer of type atomic_t
18 * @fn: function to call if the original value was not 1
19 *
20 * Change the count from 1 to a value lower than 1, and call <fn> if it
21 * wasn't 1 originally. This function MUST leave the value lower than 1
22 * even when the "1" assertion wasn't true.
23 */
24#define __mutex_fastpath_lock(count, fail_fn) \
25do { \
26 unsigned int dummy; \
27 \
28 typecheck(atomic_t *, count); \
29 typecheck_fn(void (*)(atomic_t *), fail_fn); \
30 \
31 asm volatile(LOCK_PREFIX " decl (%%eax)\n" \
32 " jns 1f \n" \
33 " call " #fail_fn "\n" \
34 "1:\n" \
35 : "=a" (dummy) \
36 : "a" (count) \
37 : "memory", "ecx", "edx"); \
38} while (0)
39
40
41/**
42 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
43 * from 1 to a 0 value
44 * @count: pointer of type atomic_t
45 *
46 * Change the count from 1 to a value lower than 1. This function returns 0
47 * if the fastpath succeeds, or -1 otherwise.
48 */
49static inline int __mutex_fastpath_lock_retval(atomic_t *count)
50{
51 if (unlikely(atomic_dec_return(count) < 0))
52 return -1;
53 else
54 return 0;
55}
56
57/**
58 * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
59 * @count: pointer of type atomic_t
60 * @fail_fn: function to call if the original value was not 0
61 *
62 * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
63 * In the failure case, this function is allowed to either set the value
64 * to 1, or to set it to a value lower than 1.
65 *
66 * If the implementation sets it to a value of lower than 1, the
67 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
68 * to return 0 otherwise.
69 */
70#define __mutex_fastpath_unlock(count, fail_fn) \
71do { \
72 unsigned int dummy; \
73 \
74 typecheck(atomic_t *, count); \
75 typecheck_fn(void (*)(atomic_t *), fail_fn); \
76 \
77 asm volatile(LOCK_PREFIX " incl (%%eax)\n" \
78 " jg 1f\n" \
79 " call " #fail_fn "\n" \
80 "1:\n" \
81 : "=a" (dummy) \
82 : "a" (count) \
83 : "memory", "ecx", "edx"); \
84} while (0)
85
86#define __mutex_slowpath_needs_to_unlock() 1
87
88/**
89 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
90 *
91 * @count: pointer of type atomic_t
92 * @fail_fn: fallback function
93 *
94 * Change the count from 1 to a value lower than 1, and return 0 (failure)
95 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
96 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
97 * Additionally, if the value was < 0 originally, this function must not leave
98 * it to 0 on failure.
99 */
100static inline int __mutex_fastpath_trylock(atomic_t *count,
101 int (*fail_fn)(atomic_t *))
102{
103 /* cmpxchg because it never induces a false contention state. */
104 if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
105 return 1;
106
107 return 0;
108}
109
110#endif /* _ASM_X86_MUTEX_32_H */
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
deleted file mode 100644
index d9850758464e..000000000000
--- a/arch/x86/include/asm/mutex_64.h
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * Assembly implementation of the mutex fastpath, based on atomic
3 * decrement/increment.
4 *
5 * started by Ingo Molnar:
6 *
7 * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8 */
9#ifndef _ASM_X86_MUTEX_64_H
10#define _ASM_X86_MUTEX_64_H
11
12/**
13 * __mutex_fastpath_lock - decrement and call function if negative
14 * @v: pointer of type atomic_t
15 * @fail_fn: function to call if the result is negative
16 *
17 * Atomically decrements @v and calls <fail_fn> if the result is negative.
18 */
19#ifdef CC_HAVE_ASM_GOTO
20static inline void __mutex_fastpath_lock(atomic_t *v,
21 void (*fail_fn)(atomic_t *))
22{
23 asm_volatile_goto(LOCK_PREFIX " decl %0\n"
24 " jns %l[exit]\n"
25 : : "m" (v->counter)
26 : "memory", "cc"
27 : exit);
28 fail_fn(v);
29exit:
30 return;
31}
32#else
33#define __mutex_fastpath_lock(v, fail_fn) \
34do { \
35 unsigned long dummy; \
36 \
37 typecheck(atomic_t *, v); \
38 typecheck_fn(void (*)(atomic_t *), fail_fn); \
39 \
40 asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
41 " jns 1f \n" \
42 " call " #fail_fn "\n" \
43 "1:" \
44 : "=D" (dummy) \
45 : "D" (v) \
46 : "rax", "rsi", "rdx", "rcx", \
47 "r8", "r9", "r10", "r11", "memory"); \
48} while (0)
49#endif
50
51/**
52 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
53 * from 1 to a 0 value
54 * @count: pointer of type atomic_t
55 *
56 * Change the count from 1 to a value lower than 1. This function returns 0
57 * if the fastpath succeeds, or -1 otherwise.
58 */
59static inline int __mutex_fastpath_lock_retval(atomic_t *count)
60{
61 if (unlikely(atomic_dec_return(count) < 0))
62 return -1;
63 else
64 return 0;
65}
66
67/**
68 * __mutex_fastpath_unlock - increment and call function if nonpositive
69 * @v: pointer of type atomic_t
70 * @fail_fn: function to call if the result is nonpositive
71 *
72 * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
73 */
74#ifdef CC_HAVE_ASM_GOTO
75static inline void __mutex_fastpath_unlock(atomic_t *v,
76 void (*fail_fn)(atomic_t *))
77{
78 asm_volatile_goto(LOCK_PREFIX " incl %0\n"
79 " jg %l[exit]\n"
80 : : "m" (v->counter)
81 : "memory", "cc"
82 : exit);
83 fail_fn(v);
84exit:
85 return;
86}
87#else
88#define __mutex_fastpath_unlock(v, fail_fn) \
89do { \
90 unsigned long dummy; \
91 \
92 typecheck(atomic_t *, v); \
93 typecheck_fn(void (*)(atomic_t *), fail_fn); \
94 \
95 asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
96 " jg 1f\n" \
97 " call " #fail_fn "\n" \
98 "1:" \
99 : "=D" (dummy) \
100 : "D" (v) \
101 : "rax", "rsi", "rdx", "rcx", \
102 "r8", "r9", "r10", "r11", "memory"); \
103} while (0)
104#endif
105
106#define __mutex_slowpath_needs_to_unlock() 1
107
108/**
109 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
110 *
111 * @count: pointer of type atomic_t
112 * @fail_fn: fallback function
113 *
114 * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
115 * if it wasn't 1 originally. [the fallback function is never used on
116 * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
117 */
118static inline int __mutex_fastpath_trylock(atomic_t *count,
119 int (*fail_fn)(atomic_t *))
120{
121 if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
122 return 1;
123
124 return 0;
125}
126
127#endif /* _ASM_X86_MUTEX_64_H */
diff --git a/arch/xtensa/include/asm/mutex.h b/arch/xtensa/include/asm/mutex.h
deleted file mode 100644
index 458c1f7fbc18..000000000000
--- a/arch/xtensa/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1/*
2 * Pull in the generic implementation for the mutex fastpath.
3 *
4 * TODO: implement optimized primitives instead, or leave the generic
5 * implementation in place, or pick the atomic_xchg() based generic
6 * implementation. (see asm-generic/mutex-xchg.h for details)
7 */
8
9#include <asm-generic/mutex-dec.h>
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
deleted file mode 100644
index c54829d3de37..000000000000
--- a/include/asm-generic/mutex-dec.h
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * include/asm-generic/mutex-dec.h
3 *
4 * Generic implementation of the mutex fastpath, based on atomic
5 * decrement/increment.
6 */
7#ifndef _ASM_GENERIC_MUTEX_DEC_H
8#define _ASM_GENERIC_MUTEX_DEC_H
9
10/**
11 * __mutex_fastpath_lock - try to take the lock by moving the count
12 * from 1 to a 0 value
13 * @count: pointer of type atomic_t
14 * @fail_fn: function to call if the original value was not 1
15 *
16 * Change the count from 1 to a value lower than 1, and call <fail_fn> if
17 * it wasn't 1 originally. This function MUST leave the value lower than
18 * 1 even when the "1" assertion wasn't true.
19 */
20static inline void
21__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
22{
23 if (unlikely(atomic_dec_return_acquire(count) < 0))
24 fail_fn(count);
25}
26
27/**
28 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
29 * from 1 to a 0 value
30 * @count: pointer of type atomic_t
31 *
32 * Change the count from 1 to a value lower than 1. This function returns 0
33 * if the fastpath succeeds, or -1 otherwise.
34 */
35static inline int
36__mutex_fastpath_lock_retval(atomic_t *count)
37{
38 if (unlikely(atomic_dec_return_acquire(count) < 0))
39 return -1;
40 return 0;
41}
42
43/**
44 * __mutex_fastpath_unlock - try to promote the count from 0 to 1
45 * @count: pointer of type atomic_t
46 * @fail_fn: function to call if the original value was not 0
47 *
48 * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
49 * In the failure case, this function is allowed to either set the value to
50 * 1, or to set it to a value lower than 1.
51 *
52 * If the implementation sets it to a value of lower than 1, then the
53 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
54 * to return 0 otherwise.
55 */
56static inline void
57__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
58{
59 if (unlikely(atomic_inc_return_release(count) <= 0))
60 fail_fn(count);
61}
62
63#define __mutex_slowpath_needs_to_unlock() 1
64
65/**
66 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
67 *
68 * @count: pointer of type atomic_t
69 * @fail_fn: fallback function
70 *
71 * Change the count from 1 to a value lower than 1, and return 0 (failure)
72 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
73 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
74 * Additionally, if the value was < 0 originally, this function must not leave
75 * it to 0 on failure.
76 *
77 * If the architecture has no effective trylock variant, it should call the
78 * <fail_fn> spinlock-based trylock variant unconditionally.
79 */
80static inline int
81__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
82{
83 if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1))
84 return 1;
85 return 0;
86}
87
88#endif
diff --git a/include/asm-generic/mutex-null.h b/include/asm-generic/mutex-null.h
deleted file mode 100644
index 61069ed334e2..000000000000
--- a/include/asm-generic/mutex-null.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * include/asm-generic/mutex-null.h
3 *
4 * Generic implementation of the mutex fastpath, based on NOP :-)
5 *
6 * This is used by the mutex-debugging infrastructure, but it can also
7 * be used by architectures that (for whatever reason) want to use the
8 * spinlock based slowpath.
9 */
10#ifndef _ASM_GENERIC_MUTEX_NULL_H
11#define _ASM_GENERIC_MUTEX_NULL_H
12
13#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
14#define __mutex_fastpath_lock_retval(count) (-1)
15#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
16#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
17#define __mutex_slowpath_needs_to_unlock() 1
18
19#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
deleted file mode 100644
index 3269ec4e195f..000000000000
--- a/include/asm-generic/mutex-xchg.h
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * include/asm-generic/mutex-xchg.h
3 *
4 * Generic implementation of the mutex fastpath, based on xchg().
5 *
6 * NOTE: An xchg based implementation might be less optimal than an atomic
7 * decrement/increment based implementation. If your architecture
8 * has a reasonable atomic dec/inc then you should probably use
9 * asm-generic/mutex-dec.h instead, or you could open-code an
10 * optimized version in asm/mutex.h.
11 */
12#ifndef _ASM_GENERIC_MUTEX_XCHG_H
13#define _ASM_GENERIC_MUTEX_XCHG_H
14
15/**
16 * __mutex_fastpath_lock - try to take the lock by moving the count
17 * from 1 to a 0 value
18 * @count: pointer of type atomic_t
19 * @fail_fn: function to call if the original value was not 1
20 *
21 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
22 * wasn't 1 originally. This function MUST leave the value lower than 1
23 * even when the "1" assertion wasn't true.
24 */
25static inline void
26__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
27{
28 if (unlikely(atomic_xchg(count, 0) != 1))
29 /*
30 * We failed to acquire the lock, so mark it contended
31 * to ensure that any waiting tasks are woken up by the
32 * unlock slow path.
33 */
34 if (likely(atomic_xchg_acquire(count, -1) != 1))
35 fail_fn(count);
36}
37
38/**
39 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
40 * from 1 to a 0 value
41 * @count: pointer of type atomic_t
42 *
43 * Change the count from 1 to a value lower than 1. This function returns 0
44 * if the fastpath succeeds, or -1 otherwise.
45 */
46static inline int
47__mutex_fastpath_lock_retval(atomic_t *count)
48{
49 if (unlikely(atomic_xchg_acquire(count, 0) != 1))
50 if (likely(atomic_xchg(count, -1) != 1))
51 return -1;
52 return 0;
53}
54
55/**
56 * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
57 * @count: pointer of type atomic_t
58 * @fail_fn: function to call if the original value was not 0
59 *
60 * try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
61 * In the failure case, this function is allowed to either set the value to
62 * 1, or to set it to a value lower than one.
63 * If the implementation sets it to a value of lower than one, the
64 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
65 * to return 0 otherwise.
66 */
67static inline void
68__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
69{
70 if (unlikely(atomic_xchg_release(count, 1) != 0))
71 fail_fn(count);
72}
73
74#define __mutex_slowpath_needs_to_unlock() 0
75
76/**
77 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
78 *
79 * @count: pointer of type atomic_t
80 * @fail_fn: spinlock based trylock implementation
81 *
82 * Change the count from 1 to a value lower than 1, and return 0 (failure)
83 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
84 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
85 * Additionally, if the value was < 0 originally, this function must not leave
86 * it to 0 on failure.
87 *
88 * If the architecture has no effective trylock variant, it should call the
89 * <fail_fn> spinlock-based trylock variant unconditionally.
90 */
91static inline int
92__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
93{
94 int prev;
95
96 if (atomic_read(count) != 1)
97 return 0;
98
99 prev = atomic_xchg_acquire(count, 0);
100 if (unlikely(prev < 0)) {
101 /*
102 * The lock was marked contended so we must restore that
103 * state. If while doing so we get back a prev value of 1
104 * then we just own it.
105 *
106 * [ In the rare case of the mutex going to 1, to 0, to -1
107 * and then back to 0 in this few-instructions window,
108 * this has the potential to trigger the slowpath for the
109 * owner's unlock path needlessly, but that's not a problem
110 * in practice. ]
111 */
112 prev = atomic_xchg_acquire(count, prev);
113 if (prev < 0)
114 prev = 0;
115 }
116
117 return prev;
118}
119
120#endif
diff --git a/include/asm-generic/mutex.h b/include/asm-generic/mutex.h
deleted file mode 100644
index fe91ab502793..000000000000
--- a/include/asm-generic/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __ASM_GENERIC_MUTEX_H
2#define __ASM_GENERIC_MUTEX_H
3/*
4 * Pull in the generic implementation for the mutex fastpath,
5 * which is a reasonable default on many architectures.
6 */
7
8#include <asm-generic/mutex-dec.h>
9#endif /* __ASM_GENERIC_MUTEX_H */