summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/alpha/include/asm/rwsem.h211
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/hexagon/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/rwsem.h172
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/s390/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/rwsem.h237
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/rwsem.S156
-rw-r--r--arch/x86/um/Makefile4
-rw-r--r--arch/xtensa/include/asm/Kbuild1
-rw-r--r--include/asm-generic/rwsem.h140
-rw-r--r--include/linux/rwsem.h4
-rw-r--r--kernel/locking/percpu-rwsem.c2
-rw-r--r--kernel/locking/rwsem.h130
19 files changed, 134 insertions, 932 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 43b36dbed48e..8876f5de7738 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9098,7 +9098,6 @@ F: arch/*/include/asm/spinlock*.h
9098F: include/linux/rwlock*.h 9098F: include/linux/rwlock*.h
9099F: include/linux/mutex*.h 9099F: include/linux/mutex*.h
9100F: include/linux/rwsem*.h 9100F: include/linux/rwsem*.h
9101F: arch/*/include/asm/rwsem.h
9102F: include/linux/seqlock.h 9101F: include/linux/seqlock.h
9103F: lib/locking*.[ch] 9102F: lib/locking*.[ch]
9104F: kernel/locking/ 9103F: kernel/locking/
diff --git a/arch/alpha/include/asm/rwsem.h b/arch/alpha/include/asm/rwsem.h
deleted file mode 100644
index cf8fc8f9a2ed..000000000000
--- a/arch/alpha/include/asm/rwsem.h
+++ /dev/null
@@ -1,211 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ALPHA_RWSEM_H
3#define _ALPHA_RWSEM_H
4
5/*
6 * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
7 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
8 */
9
10#ifndef _LINUX_RWSEM_H
11#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
12#endif
13
14#ifdef __KERNEL__
15
16#include <linux/compiler.h>
17
18#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
19#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
20#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
21#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
22#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
23#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
24
25static inline int ___down_read(struct rw_semaphore *sem)
26{
27 long oldcount;
28#ifndef CONFIG_SMP
29 oldcount = sem->count.counter;
30 sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
31#else
32 long temp;
33 __asm__ __volatile__(
34 "1: ldq_l %0,%1\n"
35 " addq %0,%3,%2\n"
36 " stq_c %2,%1\n"
37 " beq %2,2f\n"
38 " mb\n"
39 ".subsection 2\n"
40 "2: br 1b\n"
41 ".previous"
42 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
43 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
44#endif
45 return (oldcount < 0);
46}
47
48static inline void __down_read(struct rw_semaphore *sem)
49{
50 if (unlikely(___down_read(sem)))
51 rwsem_down_read_failed(sem);
52}
53
54static inline int __down_read_killable(struct rw_semaphore *sem)
55{
56 if (unlikely(___down_read(sem)))
57 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
58 return -EINTR;
59
60 return 0;
61}
62
63/*
64 * trylock for reading -- returns 1 if successful, 0 if contention
65 */
66static inline int __down_read_trylock(struct rw_semaphore *sem)
67{
68 long old, new, res;
69
70 res = atomic_long_read(&sem->count);
71 do {
72 new = res + RWSEM_ACTIVE_READ_BIAS;
73 if (new <= 0)
74 break;
75 old = res;
76 res = atomic_long_cmpxchg(&sem->count, old, new);
77 } while (res != old);
78 return res >= 0 ? 1 : 0;
79}
80
81static inline long ___down_write(struct rw_semaphore *sem)
82{
83 long oldcount;
84#ifndef CONFIG_SMP
85 oldcount = sem->count.counter;
86 sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
87#else
88 long temp;
89 __asm__ __volatile__(
90 "1: ldq_l %0,%1\n"
91 " addq %0,%3,%2\n"
92 " stq_c %2,%1\n"
93 " beq %2,2f\n"
94 " mb\n"
95 ".subsection 2\n"
96 "2: br 1b\n"
97 ".previous"
98 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
99 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
100#endif
101 return oldcount;
102}
103
104static inline void __down_write(struct rw_semaphore *sem)
105{
106 if (unlikely(___down_write(sem)))
107 rwsem_down_write_failed(sem);
108}
109
110static inline int __down_write_killable(struct rw_semaphore *sem)
111{
112 if (unlikely(___down_write(sem))) {
113 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
114 return -EINTR;
115 }
116
117 return 0;
118}
119
120/*
121 * trylock for writing -- returns 1 if successful, 0 if contention
122 */
123static inline int __down_write_trylock(struct rw_semaphore *sem)
124{
125 long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
126 RWSEM_ACTIVE_WRITE_BIAS);
127 if (ret == RWSEM_UNLOCKED_VALUE)
128 return 1;
129 return 0;
130}
131
132static inline void __up_read(struct rw_semaphore *sem)
133{
134 long oldcount;
135#ifndef CONFIG_SMP
136 oldcount = sem->count.counter;
137 sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
138#else
139 long temp;
140 __asm__ __volatile__(
141 " mb\n"
142 "1: ldq_l %0,%1\n"
143 " subq %0,%3,%2\n"
144 " stq_c %2,%1\n"
145 " beq %2,2f\n"
146 ".subsection 2\n"
147 "2: br 1b\n"
148 ".previous"
149 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
150 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
151#endif
152 if (unlikely(oldcount < 0))
153 if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
154 rwsem_wake(sem);
155}
156
157static inline void __up_write(struct rw_semaphore *sem)
158{
159 long count;
160#ifndef CONFIG_SMP
161 sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
162 count = sem->count.counter;
163#else
164 long temp;
165 __asm__ __volatile__(
166 " mb\n"
167 "1: ldq_l %0,%1\n"
168 " subq %0,%3,%2\n"
169 " stq_c %2,%1\n"
170 " beq %2,2f\n"
171 " subq %0,%3,%0\n"
172 ".subsection 2\n"
173 "2: br 1b\n"
174 ".previous"
175 :"=&r" (count), "=m" (sem->count), "=&r" (temp)
176 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
177#endif
178 if (unlikely(count))
179 if ((int)count == 0)
180 rwsem_wake(sem);
181}
182
183/*
184 * downgrade write lock to read lock
185 */
186static inline void __downgrade_write(struct rw_semaphore *sem)
187{
188 long oldcount;
189#ifndef CONFIG_SMP
190 oldcount = sem->count.counter;
191 sem->count.counter -= RWSEM_WAITING_BIAS;
192#else
193 long temp;
194 __asm__ __volatile__(
195 "1: ldq_l %0,%1\n"
196 " addq %0,%3,%2\n"
197 " stq_c %2,%1\n"
198 " beq %2,2f\n"
199 " mb\n"
200 ".subsection 2\n"
201 "2: br 1b\n"
202 ".previous"
203 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
204 :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
205#endif
206 if (unlikely(oldcount < 0))
207 rwsem_downgrade_wake(sem);
208}
209
210#endif /* __KERNEL__ */
211#endif /* _ALPHA_RWSEM_H */
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index a8a4eb7f6dae..8fb51b7bf1d5 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@ generic-y += mm-arch-hooks.h
12generic-y += msi.h 12generic-y += msi.h
13generic-y += parport.h 13generic-y += parport.h
14generic-y += preempt.h 14generic-y += preempt.h
15generic-y += rwsem.h
16generic-y += seccomp.h 15generic-y += seccomp.h
17generic-y += segment.h 16generic-y += segment.h
18generic-y += serial.h 17generic-y += serial.h
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 1e17ea5c372b..60a933b07001 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -16,7 +16,6 @@ generic-y += mm-arch-hooks.h
16generic-y += msi.h 16generic-y += msi.h
17generic-y += qrwlock.h 17generic-y += qrwlock.h
18generic-y += qspinlock.h 18generic-y += qspinlock.h
19generic-y += rwsem.h
20generic-y += segment.h 19generic-y += segment.h
21generic-y += serial.h 20generic-y += serial.h
22generic-y += set_memory.h 21generic-y += set_memory.h
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild
index d046e8ccdf78..3ff5f297acda 100644
--- a/arch/hexagon/include/asm/Kbuild
+++ b/arch/hexagon/include/asm/Kbuild
@@ -27,7 +27,6 @@ generic-y += mm-arch-hooks.h
27generic-y += pci.h 27generic-y += pci.h
28generic-y += percpu.h 28generic-y += percpu.h
29generic-y += preempt.h 29generic-y += preempt.h
30generic-y += rwsem.h
31generic-y += sections.h 30generic-y += sections.h
32generic-y += segment.h 31generic-y += segment.h
33generic-y += serial.h 32generic-y += serial.h
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
deleted file mode 100644
index 917910607e0e..000000000000
--- a/arch/ia64/include/asm/rwsem.h
+++ /dev/null
@@ -1,172 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * R/W semaphores for ia64
4 *
5 * Copyright (C) 2003 Ken Chen <kenneth.w.chen@intel.com>
6 * Copyright (C) 2003 Asit Mallick <asit.k.mallick@intel.com>
7 * Copyright (C) 2005 Christoph Lameter <cl@linux.com>
8 *
9 * Based on asm-i386/rwsem.h and other architecture implementation.
10 *
11 * The MSW of the count is the negated number of active writers and
12 * waiting lockers, and the LSW is the total number of active locks.
13 *
14 * The lock count is initialized to 0 (no active and no waiting lockers).
15 *
16 * When a writer subtracts WRITE_BIAS, it'll get 0xffffffff00000001 for
17 * the case of an uncontended lock. Readers increment by 1 and see a positive
18 * value when uncontended, negative if there are writers (and maybe) readers
19 * waiting (in which case it goes to sleep).
20 */
21
22#ifndef _ASM_IA64_RWSEM_H
23#define _ASM_IA64_RWSEM_H
24
25#ifndef _LINUX_RWSEM_H
26#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
27#endif
28
29#include <asm/intrinsics.h>
30
31#define RWSEM_UNLOCKED_VALUE __IA64_UL_CONST(0x0000000000000000)
32#define RWSEM_ACTIVE_BIAS (1L)
33#define RWSEM_ACTIVE_MASK (0xffffffffL)
34#define RWSEM_WAITING_BIAS (-0x100000000L)
35#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
36#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
37
38/*
39 * lock for reading
40 */
41static inline int
42___down_read (struct rw_semaphore *sem)
43{
44 long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
45
46 return (result < 0);
47}
48
49static inline void
50__down_read (struct rw_semaphore *sem)
51{
52 if (___down_read(sem))
53 rwsem_down_read_failed(sem);
54}
55
56static inline int
57__down_read_killable (struct rw_semaphore *sem)
58{
59 if (___down_read(sem))
60 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
61 return -EINTR;
62
63 return 0;
64}
65
66/*
67 * lock for writing
68 */
69static inline long
70___down_write (struct rw_semaphore *sem)
71{
72 long old, new;
73
74 do {
75 old = atomic_long_read(&sem->count);
76 new = old + RWSEM_ACTIVE_WRITE_BIAS;
77 } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
78
79 return old;
80}
81
82static inline void
83__down_write (struct rw_semaphore *sem)
84{
85 if (___down_write(sem))
86 rwsem_down_write_failed(sem);
87}
88
89static inline int
90__down_write_killable (struct rw_semaphore *sem)
91{
92 if (___down_write(sem)) {
93 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
94 return -EINTR;
95 }
96
97 return 0;
98}
99
100/*
101 * unlock after reading
102 */
103static inline void
104__up_read (struct rw_semaphore *sem)
105{
106 long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
107
108 if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
109 rwsem_wake(sem);
110}
111
112/*
113 * unlock after writing
114 */
115static inline void
116__up_write (struct rw_semaphore *sem)
117{
118 long old, new;
119
120 do {
121 old = atomic_long_read(&sem->count);
122 new = old - RWSEM_ACTIVE_WRITE_BIAS;
123 } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
124
125 if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
126 rwsem_wake(sem);
127}
128
129/*
130 * trylock for reading -- returns 1 if successful, 0 if contention
131 */
132static inline int
133__down_read_trylock (struct rw_semaphore *sem)
134{
135 long tmp;
136 while ((tmp = atomic_long_read(&sem->count)) >= 0) {
137 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
138 return 1;
139 }
140 }
141 return 0;
142}
143
144/*
145 * trylock for writing -- returns 1 if successful, 0 if contention
146 */
147static inline int
148__down_write_trylock (struct rw_semaphore *sem)
149{
150 long tmp = atomic_long_cmpxchg_acquire(&sem->count,
151 RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
152 return tmp == RWSEM_UNLOCKED_VALUE;
153}
154
155/*
156 * downgrade write lock to read lock
157 */
158static inline void
159__downgrade_write (struct rw_semaphore *sem)
160{
161 long old, new;
162
163 do {
164 old = atomic_long_read(&sem->count);
165 new = old - RWSEM_WAITING_BIAS;
166 } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
167
168 if (old < 0)
169 rwsem_downgrade_wake(sem);
170}
171
172#endif /* _ASM_IA64_RWSEM_H */
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index a0c132bedfae..36bda391e549 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -8,6 +8,5 @@ generic-y += irq_regs.h
8generic-y += local64.h 8generic-y += local64.h
9generic-y += mcs_spinlock.h 9generic-y += mcs_spinlock.h
10generic-y += preempt.h 10generic-y += preempt.h
11generic-y += rwsem.h
12generic-y += vtime.h 11generic-y += vtime.h
13generic-y += msi.h 12generic-y += msi.h
diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
index 12d77cb11fe5..d5fadefea33c 100644
--- a/arch/s390/include/asm/Kbuild
+++ b/arch/s390/include/asm/Kbuild
@@ -20,7 +20,6 @@ generic-y += local.h
20generic-y += local64.h 20generic-y += local64.h
21generic-y += mcs_spinlock.h 21generic-y += mcs_spinlock.h
22generic-y += mm-arch-hooks.h 22generic-y += mm-arch-hooks.h
23generic-y += rwsem.h
24generic-y += trace_clock.h 23generic-y += trace_clock.h
25generic-y += unaligned.h 24generic-y += unaligned.h
26generic-y += word-at-a-time.h 25generic-y += word-at-a-time.h
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 7bf2cb680d32..73fff39a0122 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -17,7 +17,6 @@ generic-y += mm-arch-hooks.h
17generic-y += parport.h 17generic-y += parport.h
18generic-y += percpu.h 18generic-y += percpu.h
19generic-y += preempt.h 19generic-y += preempt.h
20generic-y += rwsem.h
21generic-y += serial.h 20generic-y += serial.h
22generic-y += sizes.h 21generic-y += sizes.h
23generic-y += trace_clock.h 22generic-y += trace_clock.h
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index a22cfd5c0ee8..2ca3200d3616 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -18,7 +18,6 @@ generic-y += mm-arch-hooks.h
18generic-y += module.h 18generic-y += module.h
19generic-y += msi.h 19generic-y += msi.h
20generic-y += preempt.h 20generic-y += preempt.h
21generic-y += rwsem.h
22generic-y += serial.h 21generic-y += serial.h
23generic-y += trace_clock.h 22generic-y += trace_clock.h
24generic-y += word-at-a-time.h 23generic-y += word-at-a-time.h
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
deleted file mode 100644
index 4c25cf6caefa..000000000000
--- a/arch/x86/include/asm/rwsem.h
+++ /dev/null
@@ -1,237 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 *
6 * Derived from asm-x86/semaphore.h
7 *
8 *
9 * The MSW of the count is the negated number of active writers and waiting
10 * lockers, and the LSW is the total number of active locks
11 *
12 * The lock count is initialized to 0 (no active and no waiting lockers).
13 *
14 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
15 * uncontended lock. This can be determined because XADD returns the old value.
16 * Readers increment by 1 and see a positive value when uncontended, negative
17 * if there are writers (and maybe) readers waiting (in which case it goes to
18 * sleep).
19 *
20 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
21 * be extended to 65534 by manually checking the whole MSW rather than relying
22 * on the S flag.
23 *
24 * The value of ACTIVE_BIAS supports up to 65535 active processes.
25 *
26 * This should be totally fair - if anything is waiting, a process that wants a
27 * lock will go to the back of the queue. When the currently active lock is
28 * released, if there's a writer at the front of the queue, then that and only
29 * that will be woken up; if there's a bunch of consecutive readers at the
30 * front, then they'll all be woken up, but no other readers will be.
31 */
32
33#ifndef _ASM_X86_RWSEM_H
34#define _ASM_X86_RWSEM_H
35
36#ifndef _LINUX_RWSEM_H
37#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
38#endif
39
40#ifdef __KERNEL__
41#include <asm/asm.h>
42
43/*
44 * The bias values and the counter type limits the number of
45 * potential readers/writers to 32767 for 32 bits and 2147483647
46 * for 64 bits.
47 */
48
49#ifdef CONFIG_X86_64
50# define RWSEM_ACTIVE_MASK 0xffffffffL
51#else
52# define RWSEM_ACTIVE_MASK 0x0000ffffL
53#endif
54
55#define RWSEM_UNLOCKED_VALUE 0x00000000L
56#define RWSEM_ACTIVE_BIAS 0x00000001L
57#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
58#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
59#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
60
61/*
62 * lock for reading
63 */
64#define ____down_read(sem, slow_path) \
65({ \
66 struct rw_semaphore* ret; \
67 asm volatile("# beginning down_read\n\t" \
68 LOCK_PREFIX _ASM_INC "(%[sem])\n\t" \
69 /* adds 0x00000001 */ \
70 " jns 1f\n" \
71 " call " slow_path "\n" \
72 "1:\n\t" \
73 "# ending down_read\n\t" \
74 : "+m" (sem->count), "=a" (ret), \
75 ASM_CALL_CONSTRAINT \
76 : [sem] "a" (sem) \
77 : "memory", "cc"); \
78 ret; \
79})
80
81static inline void __down_read(struct rw_semaphore *sem)
82{
83 ____down_read(sem, "call_rwsem_down_read_failed");
84}
85
86static inline int __down_read_killable(struct rw_semaphore *sem)
87{
88 if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable")))
89 return -EINTR;
90 return 0;
91}
92
93/*
94 * trylock for reading -- returns 1 if successful, 0 if contention
95 */
96static inline bool __down_read_trylock(struct rw_semaphore *sem)
97{
98 long result, tmp;
99 asm volatile("# beginning __down_read_trylock\n\t"
100 " mov %[count],%[result]\n\t"
101 "1:\n\t"
102 " mov %[result],%[tmp]\n\t"
103 " add %[inc],%[tmp]\n\t"
104 " jle 2f\n\t"
105 LOCK_PREFIX " cmpxchg %[tmp],%[count]\n\t"
106 " jnz 1b\n\t"
107 "2:\n\t"
108 "# ending __down_read_trylock\n\t"
109 : [count] "+m" (sem->count), [result] "=&a" (result),
110 [tmp] "=&r" (tmp)
111 : [inc] "i" (RWSEM_ACTIVE_READ_BIAS)
112 : "memory", "cc");
113 return result >= 0;
114}
115
116/*
117 * lock for writing
118 */
119#define ____down_write(sem, slow_path) \
120({ \
121 long tmp; \
122 struct rw_semaphore* ret; \
123 \
124 asm volatile("# beginning down_write\n\t" \
125 LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t" \
126 /* adds 0xffff0001, returns the old value */ \
127 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
128 /* was the active mask 0 before? */\
129 " jz 1f\n" \
130 " call " slow_path "\n" \
131 "1:\n" \
132 "# ending down_write" \
133 : "+m" (sem->count), [tmp] "=d" (tmp), \
134 "=a" (ret), ASM_CALL_CONSTRAINT \
135 : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \
136 : "memory", "cc"); \
137 ret; \
138})
139
140static inline void __down_write(struct rw_semaphore *sem)
141{
142 ____down_write(sem, "call_rwsem_down_write_failed");
143}
144
145static inline int __down_write_killable(struct rw_semaphore *sem)
146{
147 if (IS_ERR(____down_write(sem, "call_rwsem_down_write_failed_killable")))
148 return -EINTR;
149
150 return 0;
151}
152
153/*
154 * trylock for writing -- returns 1 if successful, 0 if contention
155 */
156static inline bool __down_write_trylock(struct rw_semaphore *sem)
157{
158 bool result;
159 long tmp0, tmp1;
160 asm volatile("# beginning __down_write_trylock\n\t"
161 " mov %[count],%[tmp0]\n\t"
162 "1:\n\t"
163 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
164 /* was the active mask 0 before? */
165 " jnz 2f\n\t"
166 " mov %[tmp0],%[tmp1]\n\t"
167 " add %[inc],%[tmp1]\n\t"
168 LOCK_PREFIX " cmpxchg %[tmp1],%[count]\n\t"
169 " jnz 1b\n\t"
170 "2:\n\t"
171 CC_SET(e)
172 "# ending __down_write_trylock\n\t"
173 : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
174 [tmp1] "=&r" (tmp1), CC_OUT(e) (result)
175 : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS)
176 : "memory");
177 return result;
178}
179
180/*
181 * unlock after reading
182 */
183static inline void __up_read(struct rw_semaphore *sem)
184{
185 long tmp;
186 asm volatile("# beginning __up_read\n\t"
187 LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
188 /* subtracts 1, returns the old value */
189 " jns 1f\n\t"
190 " call call_rwsem_wake\n" /* expects old value in %edx */
191 "1:\n"
192 "# ending __up_read\n"
193 : "+m" (sem->count), [tmp] "=d" (tmp)
194 : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS)
195 : "memory", "cc");
196}
197
198/*
199 * unlock after writing
200 */
201static inline void __up_write(struct rw_semaphore *sem)
202{
203 long tmp;
204 asm volatile("# beginning __up_write\n\t"
205 LOCK_PREFIX " xadd %[tmp],(%[sem])\n\t"
206 /* subtracts 0xffff0001, returns the old value */
207 " jns 1f\n\t"
208 " call call_rwsem_wake\n" /* expects old value in %edx */
209 "1:\n\t"
210 "# ending __up_write\n"
211 : "+m" (sem->count), [tmp] "=d" (tmp)
212 : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS)
213 : "memory", "cc");
214}
215
216/*
217 * downgrade write lock to read lock
218 */
219static inline void __downgrade_write(struct rw_semaphore *sem)
220{
221 asm volatile("# beginning __downgrade_write\n\t"
222 LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t"
223 /*
224 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
225 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
226 */
227 " jns 1f\n\t"
228 " call call_rwsem_downgrade_wake\n"
229 "1:\n\t"
230 "# ending __downgrade_write\n"
231 : "+m" (sem->count)
232 : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS)
233 : "memory", "cc");
234}
235
236#endif /* __KERNEL__ */
237#endif /* _ASM_X86_RWSEM_H */
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 140e61843a07..986652064b15 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
23lib-y := delay.o misc.o cmdline.o cpu.o 23lib-y := delay.o misc.o cmdline.o cpu.o
24lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o 24lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
25lib-y += memcpy_$(BITS).o 25lib-y += memcpy_$(BITS).o
26lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
27lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o 26lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
28lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o 27lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
29lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o 28lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
deleted file mode 100644
index dc2ab6ea6768..000000000000
--- a/arch/x86/lib/rwsem.S
+++ /dev/null
@@ -1,156 +0,0 @@
1/*
2 * x86 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15
16#include <linux/linkage.h>
17#include <asm/alternative-asm.h>
18#include <asm/frame.h>
19
20#define __ASM_HALF_REG(reg) __ASM_SEL(reg, e##reg)
21#define __ASM_HALF_SIZE(inst) __ASM_SEL(inst##w, inst##l)
22
23#ifdef CONFIG_X86_32
24
25/*
26 * The semaphore operations have a special calling sequence that
27 * allow us to do a simpler in-line version of them. These routines
28 * need to convert that sequence back into the C sequence when
29 * there is contention on the semaphore.
30 *
31 * %eax contains the semaphore pointer on entry. Save the C-clobbered
32 * registers (%eax, %edx and %ecx) except %eax which is either a return
33 * value or just gets clobbered. Same is true for %edx so make sure GCC
34 * reloads it after the slow path, by making it hold a temporary, for
35 * example see ____down_write().
36 */
37
38#define save_common_regs \
39 pushl %ecx
40
41#define restore_common_regs \
42 popl %ecx
43
44 /* Avoid uglifying the argument copying x86-64 needs to do. */
45 .macro movq src, dst
46 .endm
47
48#else
49
50/*
51 * x86-64 rwsem wrappers
52 *
53 * This interfaces the inline asm code to the slow-path
54 * C routines. We need to save the call-clobbered regs
55 * that the asm does not mark as clobbered, and move the
56 * argument from %rax to %rdi.
57 *
58 * NOTE! We don't need to save %rax, because the functions
59 * will always return the semaphore pointer in %rax (which
60 * is also the input argument to these helpers)
61 *
62 * The following can clobber %rdx because the asm clobbers it:
63 * call_rwsem_down_write_failed
64 * call_rwsem_wake
65 * but %rdi, %rsi, %rcx, %r8-r11 always need saving.
66 */
67
68#define save_common_regs \
69 pushq %rdi; \
70 pushq %rsi; \
71 pushq %rcx; \
72 pushq %r8; \
73 pushq %r9; \
74 pushq %r10; \
75 pushq %r11
76
77#define restore_common_regs \
78 popq %r11; \
79 popq %r10; \
80 popq %r9; \
81 popq %r8; \
82 popq %rcx; \
83 popq %rsi; \
84 popq %rdi
85
86#endif
87
88/* Fix up special calling conventions */
89ENTRY(call_rwsem_down_read_failed)
90 FRAME_BEGIN
91 save_common_regs
92 __ASM_SIZE(push,) %__ASM_REG(dx)
93 movq %rax,%rdi
94 call rwsem_down_read_failed
95 __ASM_SIZE(pop,) %__ASM_REG(dx)
96 restore_common_regs
97 FRAME_END
98 ret
99ENDPROC(call_rwsem_down_read_failed)
100
101ENTRY(call_rwsem_down_read_failed_killable)
102 FRAME_BEGIN
103 save_common_regs
104 __ASM_SIZE(push,) %__ASM_REG(dx)
105 movq %rax,%rdi
106 call rwsem_down_read_failed_killable
107 __ASM_SIZE(pop,) %__ASM_REG(dx)
108 restore_common_regs
109 FRAME_END
110 ret
111ENDPROC(call_rwsem_down_read_failed_killable)
112
113ENTRY(call_rwsem_down_write_failed)
114 FRAME_BEGIN
115 save_common_regs
116 movq %rax,%rdi
117 call rwsem_down_write_failed
118 restore_common_regs
119 FRAME_END
120 ret
121ENDPROC(call_rwsem_down_write_failed)
122
123ENTRY(call_rwsem_down_write_failed_killable)
124 FRAME_BEGIN
125 save_common_regs
126 movq %rax,%rdi
127 call rwsem_down_write_failed_killable
128 restore_common_regs
129 FRAME_END
130 ret
131ENDPROC(call_rwsem_down_write_failed_killable)
132
133ENTRY(call_rwsem_wake)
134 FRAME_BEGIN
135 /* do nothing if still outstanding active readers */
136 __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
137 jnz 1f
138 save_common_regs
139 movq %rax,%rdi
140 call rwsem_wake
141 restore_common_regs
1421: FRAME_END
143 ret
144ENDPROC(call_rwsem_wake)
145
146ENTRY(call_rwsem_downgrade_wake)
147 FRAME_BEGIN
148 save_common_regs
149 __ASM_SIZE(push,) %__ASM_REG(dx)
150 movq %rax,%rdi
151 call rwsem_downgrade_wake
152 __ASM_SIZE(pop,) %__ASM_REG(dx)
153 restore_common_regs
154 FRAME_END
155 ret
156ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index 2d686ae54681..33c51c064c77 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -21,14 +21,12 @@ obj-y += checksum_32.o syscalls_32.o
21obj-$(CONFIG_ELF_CORE) += elfcore.o 21obj-$(CONFIG_ELF_CORE) += elfcore.o
22 22
23subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o 23subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
24subarch-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += ../lib/rwsem.o
25 24
26else 25else
27 26
28obj-y += syscalls_64.o vdso/ 27obj-y += syscalls_64.o vdso/
29 28
30subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \ 29subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o
31 ../lib/rwsem.o
32 30
33endif 31endif
34 32
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 3843198e03d4..4148090cafb0 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -25,7 +25,6 @@ generic-y += percpu.h
25generic-y += preempt.h 25generic-y += preempt.h
26generic-y += qrwlock.h 26generic-y += qrwlock.h
27generic-y += qspinlock.h 27generic-y += qspinlock.h
28generic-y += rwsem.h
29generic-y += sections.h 28generic-y += sections.h
30generic-y += socket.h 29generic-y += socket.h
31generic-y += topology.h 30generic-y += topology.h
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
deleted file mode 100644
index 93e67a055a4d..000000000000
--- a/include/asm-generic/rwsem.h
+++ /dev/null
@@ -1,140 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_GENERIC_RWSEM_H
3#define _ASM_GENERIC_RWSEM_H
4
5#ifndef _LINUX_RWSEM_H
6#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
7#endif
8
9#ifdef __KERNEL__
10
11/*
12 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
13 * Adapted largely from include/asm-i386/rwsem.h
14 * by Paul Mackerras <paulus@samba.org>.
15 */
16
17/*
18 * the semaphore definition
19 */
20#ifdef CONFIG_64BIT
21# define RWSEM_ACTIVE_MASK 0xffffffffL
22#else
23# define RWSEM_ACTIVE_MASK 0x0000ffffL
24#endif
25
26#define RWSEM_UNLOCKED_VALUE 0x00000000L
27#define RWSEM_ACTIVE_BIAS 0x00000001L
28#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
29#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
30#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
31
32/*
33 * lock for reading
34 */
35static inline void __down_read(struct rw_semaphore *sem)
36{
37 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
38 rwsem_down_read_failed(sem);
39}
40
41static inline int __down_read_killable(struct rw_semaphore *sem)
42{
43 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
44 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
45 return -EINTR;
46 }
47
48 return 0;
49}
50
51static inline int __down_read_trylock(struct rw_semaphore *sem)
52{
53 long tmp;
54
55 while ((tmp = atomic_long_read(&sem->count)) >= 0) {
56 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
57 tmp + RWSEM_ACTIVE_READ_BIAS)) {
58 return 1;
59 }
60 }
61 return 0;
62}
63
64/*
65 * lock for writing
66 */
67static inline void __down_write(struct rw_semaphore *sem)
68{
69 long tmp;
70
71 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
72 &sem->count);
73 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
74 rwsem_down_write_failed(sem);
75}
76
77static inline int __down_write_killable(struct rw_semaphore *sem)
78{
79 long tmp;
80
81 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
82 &sem->count);
83 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
84 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
85 return -EINTR;
86 return 0;
87}
88
89static inline int __down_write_trylock(struct rw_semaphore *sem)
90{
91 long tmp;
92
93 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
94 RWSEM_ACTIVE_WRITE_BIAS);
95 return tmp == RWSEM_UNLOCKED_VALUE;
96}
97
98/*
99 * unlock after reading
100 */
101static inline void __up_read(struct rw_semaphore *sem)
102{
103 long tmp;
104
105 tmp = atomic_long_dec_return_release(&sem->count);
106 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
107 rwsem_wake(sem);
108}
109
110/*
111 * unlock after writing
112 */
113static inline void __up_write(struct rw_semaphore *sem)
114{
115 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
116 &sem->count) < 0))
117 rwsem_wake(sem);
118}
119
120/*
121 * downgrade write lock to read lock
122 */
123static inline void __downgrade_write(struct rw_semaphore *sem)
124{
125 long tmp;
126
127 /*
128 * When downgrading from exclusive to shared ownership,
129 * anything inside the write-locked region cannot leak
130 * into the read side. In contrast, anything in the
131 * read-locked region is ok to be re-ordered into the
132 * write side. As such, rely on RELEASE semantics.
133 */
134 tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
135 if (tmp < 0)
136 rwsem_downgrade_wake(sem);
137}
138
139#endif /* __KERNEL__ */
140#endif /* _ASM_GENERIC_RWSEM_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 67dbb57508b1..6e56006b2cb6 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -57,15 +57,13 @@ extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore
57extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); 57extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
58extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); 58extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
59 59
60/* Include the arch specific part */
61#include <asm/rwsem.h>
62
63/* In all implementations count != 0 means locked */ 60/* In all implementations count != 0 means locked */
64static inline int rwsem_is_locked(struct rw_semaphore *sem) 61static inline int rwsem_is_locked(struct rw_semaphore *sem)
65{ 62{
66 return atomic_long_read(&sem->count) != 0; 63 return atomic_long_read(&sem->count) != 0;
67} 64}
68 65
66#define RWSEM_UNLOCKED_VALUE 0L
69#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) 67#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
70#endif 68#endif
71 69
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index 883cf1b92d90..f17dad99eec8 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -7,6 +7,8 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/errno.h> 8#include <linux/errno.h>
9 9
10#include "rwsem.h"
11
10int __percpu_init_rwsem(struct percpu_rw_semaphore *sem, 12int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
11 const char *name, struct lock_class_key *rwsem_key) 13 const char *name, struct lock_class_key *rwsem_key)
12{ 14{
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
index bad2bca0268b..067e265fa5c1 100644
--- a/kernel/locking/rwsem.h
+++ b/kernel/locking/rwsem.h
@@ -32,6 +32,26 @@
32# define DEBUG_RWSEMS_WARN_ON(c) 32# define DEBUG_RWSEMS_WARN_ON(c)
33#endif 33#endif
34 34
35/*
36 * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
37 * Adapted largely from include/asm-i386/rwsem.h
38 * by Paul Mackerras <paulus@samba.org>.
39 */
40
41/*
42 * the semaphore definition
43 */
44#ifdef CONFIG_64BIT
45# define RWSEM_ACTIVE_MASK 0xffffffffL
46#else
47# define RWSEM_ACTIVE_MASK 0x0000ffffL
48#endif
49
50#define RWSEM_ACTIVE_BIAS 0x00000001L
51#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
52#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
53#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
54
35#ifdef CONFIG_RWSEM_SPIN_ON_OWNER 55#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
36/* 56/*
37 * All writes to owner are protected by WRITE_ONCE() to make sure that 57 * All writes to owner are protected by WRITE_ONCE() to make sure that
@@ -132,3 +152,113 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
132{ 152{
133} 153}
134#endif 154#endif
155
156#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
157/*
158 * lock for reading
159 */
160static inline void __down_read(struct rw_semaphore *sem)
161{
162 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
163 rwsem_down_read_failed(sem);
164}
165
166static inline int __down_read_killable(struct rw_semaphore *sem)
167{
168 if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
169 if (IS_ERR(rwsem_down_read_failed_killable(sem)))
170 return -EINTR;
171 }
172
173 return 0;
174}
175
176static inline int __down_read_trylock(struct rw_semaphore *sem)
177{
178 long tmp;
179
180 while ((tmp = atomic_long_read(&sem->count)) >= 0) {
181 if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
182 tmp + RWSEM_ACTIVE_READ_BIAS)) {
183 return 1;
184 }
185 }
186 return 0;
187}
188
189/*
190 * lock for writing
191 */
192static inline void __down_write(struct rw_semaphore *sem)
193{
194 long tmp;
195
196 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
197 &sem->count);
198 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
199 rwsem_down_write_failed(sem);
200}
201
202static inline int __down_write_killable(struct rw_semaphore *sem)
203{
204 long tmp;
205
206 tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
207 &sem->count);
208 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
209 if (IS_ERR(rwsem_down_write_failed_killable(sem)))
210 return -EINTR;
211 return 0;
212}
213
214static inline int __down_write_trylock(struct rw_semaphore *sem)
215{
216 long tmp;
217
218 tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
219 RWSEM_ACTIVE_WRITE_BIAS);
220 return tmp == RWSEM_UNLOCKED_VALUE;
221}
222
223/*
224 * unlock after reading
225 */
226static inline void __up_read(struct rw_semaphore *sem)
227{
228 long tmp;
229
230 tmp = atomic_long_dec_return_release(&sem->count);
231 if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
232 rwsem_wake(sem);
233}
234
235/*
236 * unlock after writing
237 */
238static inline void __up_write(struct rw_semaphore *sem)
239{
240 if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
241 &sem->count) < 0))
242 rwsem_wake(sem);
243}
244
245/*
246 * downgrade write lock to read lock
247 */
248static inline void __downgrade_write(struct rw_semaphore *sem)
249{
250 long tmp;
251
252 /*
253 * When downgrading from exclusive to shared ownership,
254 * anything inside the write-locked region cannot leak
255 * into the read side. In contrast, anything in the
256 * read-locked region is ok to be re-ordered into the
257 * write side. As such, rely on RELEASE semantics.
258 */
259 tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
260 if (tmp < 0)
261 rwsem_downgrade_wake(sem);
262}
263
264#endif /* CONFIG_RWSEM_XCHGADD_ALGORITHM */