aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-10-31 13:19:28 -0400
committerIngo Molnar <mingo@kernel.org>2013-11-06 03:24:18 -0500
commited428bfc3caaa4b1e6cd15ea12c90c30291903f0 (patch)
tree7e69731ab4951cb9473e96b2fd4b743321c6d770 /lib
parent1696a8bee390929fed05c6297164816ae2ced280 (diff)
locking: Move the rwsem code to kernel/locking/
Notably: changed lib/rwsem* targets from lib- to obj-, no idea about the ramifications of that. Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/tip-g0kynfh5feriwc6p3h6kpbw6@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile2
-rw-r--r--lib/rwsem-spinlock.c296
-rw-r--r--lib/rwsem.c293
3 files changed, 0 insertions, 591 deletions
diff --git a/lib/Makefile b/lib/Makefile
index bee27e1d3431..ca8cadc8fe4d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -42,8 +42,6 @@ obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
42obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o 42obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
43obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o 43obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
44obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 44obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
45lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
46lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
47lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o 45lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
48 46
49CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) 47CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
deleted file mode 100644
index 9be8a9144978..000000000000
--- a/lib/rwsem-spinlock.c
+++ /dev/null
@@ -1,296 +0,0 @@
1/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2 * generic spinlock implementation
3 *
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
7 */
8#include <linux/rwsem.h>
9#include <linux/sched.h>
10#include <linux/export.h>
11
12enum rwsem_waiter_type {
13 RWSEM_WAITING_FOR_WRITE,
14 RWSEM_WAITING_FOR_READ
15};
16
17struct rwsem_waiter {
18 struct list_head list;
19 struct task_struct *task;
20 enum rwsem_waiter_type type;
21};
22
23int rwsem_is_locked(struct rw_semaphore *sem)
24{
25 int ret = 1;
26 unsigned long flags;
27
28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
29 ret = (sem->activity != 0);
30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
31 }
32 return ret;
33}
34EXPORT_SYMBOL(rwsem_is_locked);
35
36/*
37 * initialise the semaphore
38 */
39void __init_rwsem(struct rw_semaphore *sem, const char *name,
40 struct lock_class_key *key)
41{
42#ifdef CONFIG_DEBUG_LOCK_ALLOC
43 /*
44 * Make sure we are not reinitializing a held semaphore:
45 */
46 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
47 lockdep_init_map(&sem->dep_map, name, key, 0);
48#endif
49 sem->activity = 0;
50 raw_spin_lock_init(&sem->wait_lock);
51 INIT_LIST_HEAD(&sem->wait_list);
52}
53EXPORT_SYMBOL(__init_rwsem);
54
55/*
56 * handle the lock release when processes blocked on it that can now run
57 * - if we come here, then:
58 * - the 'active count' _reached_ zero
59 * - the 'waiting count' is non-zero
60 * - the spinlock must be held by the caller
61 * - woken process blocks are discarded from the list after having task zeroed
62 * - writers are only woken if wakewrite is non-zero
63 */
64static inline struct rw_semaphore *
65__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
66{
67 struct rwsem_waiter *waiter;
68 struct task_struct *tsk;
69 int woken;
70
71 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
72
73 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
74 if (wakewrite)
75 /* Wake up a writer. Note that we do not grant it the
76 * lock - it will have to acquire it when it runs. */
77 wake_up_process(waiter->task);
78 goto out;
79 }
80
81 /* grant an infinite number of read locks to the front of the queue */
82 woken = 0;
83 do {
84 struct list_head *next = waiter->list.next;
85
86 list_del(&waiter->list);
87 tsk = waiter->task;
88 smp_mb();
89 waiter->task = NULL;
90 wake_up_process(tsk);
91 put_task_struct(tsk);
92 woken++;
93 if (next == &sem->wait_list)
94 break;
95 waiter = list_entry(next, struct rwsem_waiter, list);
96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
97
98 sem->activity += woken;
99
100 out:
101 return sem;
102}
103
104/*
105 * wake a single writer
106 */
107static inline struct rw_semaphore *
108__rwsem_wake_one_writer(struct rw_semaphore *sem)
109{
110 struct rwsem_waiter *waiter;
111
112 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
113 wake_up_process(waiter->task);
114
115 return sem;
116}
117
118/*
119 * get a read lock on the semaphore
120 */
121void __sched __down_read(struct rw_semaphore *sem)
122{
123 struct rwsem_waiter waiter;
124 struct task_struct *tsk;
125 unsigned long flags;
126
127 raw_spin_lock_irqsave(&sem->wait_lock, flags);
128
129 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
130 /* granted */
131 sem->activity++;
132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
133 goto out;
134 }
135
136 tsk = current;
137 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
138
139 /* set up my own style of waitqueue */
140 waiter.task = tsk;
141 waiter.type = RWSEM_WAITING_FOR_READ;
142 get_task_struct(tsk);
143
144 list_add_tail(&waiter.list, &sem->wait_list);
145
146 /* we don't need to touch the semaphore struct anymore */
147 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
148
149 /* wait to be given the lock */
150 for (;;) {
151 if (!waiter.task)
152 break;
153 schedule();
154 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
155 }
156
157 tsk->state = TASK_RUNNING;
158 out:
159 ;
160}
161
162/*
163 * trylock for reading -- returns 1 if successful, 0 if contention
164 */
165int __down_read_trylock(struct rw_semaphore *sem)
166{
167 unsigned long flags;
168 int ret = 0;
169
170
171 raw_spin_lock_irqsave(&sem->wait_lock, flags);
172
173 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
174 /* granted */
175 sem->activity++;
176 ret = 1;
177 }
178
179 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
180
181 return ret;
182}
183
184/*
185 * get a write lock on the semaphore
186 */
187void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
188{
189 struct rwsem_waiter waiter;
190 struct task_struct *tsk;
191 unsigned long flags;
192
193 raw_spin_lock_irqsave(&sem->wait_lock, flags);
194
195 /* set up my own style of waitqueue */
196 tsk = current;
197 waiter.task = tsk;
198 waiter.type = RWSEM_WAITING_FOR_WRITE;
199 list_add_tail(&waiter.list, &sem->wait_list);
200
201 /* wait for someone to release the lock */
202 for (;;) {
203 /*
204 * That is the key to support write lock stealing: allows the
205 * task already on CPU to get the lock soon rather than put
206 * itself into sleep and waiting for system woke it or someone
207 * else in the head of the wait list up.
208 */
209 if (sem->activity == 0)
210 break;
211 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
213 schedule();
214 raw_spin_lock_irqsave(&sem->wait_lock, flags);
215 }
216 /* got the lock */
217 sem->activity = -1;
218 list_del(&waiter.list);
219
220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
221}
222
223void __sched __down_write(struct rw_semaphore *sem)
224{
225 __down_write_nested(sem, 0);
226}
227
228/*
229 * trylock for writing -- returns 1 if successful, 0 if contention
230 */
231int __down_write_trylock(struct rw_semaphore *sem)
232{
233 unsigned long flags;
234 int ret = 0;
235
236 raw_spin_lock_irqsave(&sem->wait_lock, flags);
237
238 if (sem->activity == 0) {
239 /* got the lock */
240 sem->activity = -1;
241 ret = 1;
242 }
243
244 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
245
246 return ret;
247}
248
249/*
250 * release a read lock on the semaphore
251 */
252void __up_read(struct rw_semaphore *sem)
253{
254 unsigned long flags;
255
256 raw_spin_lock_irqsave(&sem->wait_lock, flags);
257
258 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
259 sem = __rwsem_wake_one_writer(sem);
260
261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
262}
263
264/*
265 * release a write lock on the semaphore
266 */
267void __up_write(struct rw_semaphore *sem)
268{
269 unsigned long flags;
270
271 raw_spin_lock_irqsave(&sem->wait_lock, flags);
272
273 sem->activity = 0;
274 if (!list_empty(&sem->wait_list))
275 sem = __rwsem_do_wake(sem, 1);
276
277 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
278}
279
280/*
281 * downgrade a write lock into a read lock
282 * - just wake up any readers at the front of the queue
283 */
284void __downgrade_write(struct rw_semaphore *sem)
285{
286 unsigned long flags;
287
288 raw_spin_lock_irqsave(&sem->wait_lock, flags);
289
290 sem->activity = 1;
291 if (!list_empty(&sem->wait_list))
292 sem = __rwsem_do_wake(sem, 0);
293
294 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
295}
296
diff --git a/lib/rwsem.c b/lib/rwsem.c
deleted file mode 100644
index 19c5fa95e0b4..000000000000
--- a/lib/rwsem.c
+++ /dev/null
@@ -1,293 +0,0 @@
1/* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
8 */
9#include <linux/rwsem.h>
10#include <linux/sched.h>
11#include <linux/init.h>
12#include <linux/export.h>
13
14/*
15 * Initialize an rwsem:
16 */
17void __init_rwsem(struct rw_semaphore *sem, const char *name,
18 struct lock_class_key *key)
19{
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 /*
22 * Make sure we are not reinitializing a held semaphore:
23 */
24 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
25 lockdep_init_map(&sem->dep_map, name, key, 0);
26#endif
27 sem->count = RWSEM_UNLOCKED_VALUE;
28 raw_spin_lock_init(&sem->wait_lock);
29 INIT_LIST_HEAD(&sem->wait_list);
30}
31
32EXPORT_SYMBOL(__init_rwsem);
33
34enum rwsem_waiter_type {
35 RWSEM_WAITING_FOR_WRITE,
36 RWSEM_WAITING_FOR_READ
37};
38
39struct rwsem_waiter {
40 struct list_head list;
41 struct task_struct *task;
42 enum rwsem_waiter_type type;
43};
44
45enum rwsem_wake_type {
46 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
47 RWSEM_WAKE_READERS, /* Wake readers only */
48 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
49};
50
51/*
52 * handle the lock release when processes blocked on it that can now run
53 * - if we come here from up_xxxx(), then:
54 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
55 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
56 * - there must be someone on the queue
57 * - the spinlock must be held by the caller
58 * - woken process blocks are discarded from the list after having task zeroed
59 * - writers are only woken if downgrading is false
60 */
61static struct rw_semaphore *
62__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
63{
64 struct rwsem_waiter *waiter;
65 struct task_struct *tsk;
66 struct list_head *next;
67 long oldcount, woken, loop, adjustment;
68
69 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
70 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
71 if (wake_type == RWSEM_WAKE_ANY)
72 /* Wake writer at the front of the queue, but do not
73 * grant it the lock yet as we want other writers
74 * to be able to steal it. Readers, on the other hand,
75 * will block as they will notice the queued writer.
76 */
77 wake_up_process(waiter->task);
78 goto out;
79 }
80
81 /* Writers might steal the lock before we grant it to the next reader.
82 * We prefer to do the first reader grant before counting readers
83 * so we can bail out early if a writer stole the lock.
84 */
85 adjustment = 0;
86 if (wake_type != RWSEM_WAKE_READ_OWNED) {
87 adjustment = RWSEM_ACTIVE_READ_BIAS;
88 try_reader_grant:
89 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
90 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
91 /* A writer stole the lock. Undo our reader grant. */
92 if (rwsem_atomic_update(-adjustment, sem) &
93 RWSEM_ACTIVE_MASK)
94 goto out;
95 /* Last active locker left. Retry waking readers. */
96 goto try_reader_grant;
97 }
98 }
99
100 /* Grant an infinite number of read locks to the readers at the front
101 * of the queue. Note we increment the 'active part' of the count by
102 * the number of readers before waking any processes up.
103 */
104 woken = 0;
105 do {
106 woken++;
107
108 if (waiter->list.next == &sem->wait_list)
109 break;
110
111 waiter = list_entry(waiter->list.next,
112 struct rwsem_waiter, list);
113
114 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
115
116 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
117 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
118 /* hit end of list above */
119 adjustment -= RWSEM_WAITING_BIAS;
120
121 if (adjustment)
122 rwsem_atomic_add(adjustment, sem);
123
124 next = sem->wait_list.next;
125 loop = woken;
126 do {
127 waiter = list_entry(next, struct rwsem_waiter, list);
128 next = waiter->list.next;
129 tsk = waiter->task;
130 smp_mb();
131 waiter->task = NULL;
132 wake_up_process(tsk);
133 put_task_struct(tsk);
134 } while (--loop);
135
136 sem->wait_list.next = next;
137 next->prev = &sem->wait_list;
138
139 out:
140 return sem;
141}
142
143/*
144 * wait for the read lock to be granted
145 */
146struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
147{
148 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
149 struct rwsem_waiter waiter;
150 struct task_struct *tsk = current;
151
152 /* set up my own style of waitqueue */
153 waiter.task = tsk;
154 waiter.type = RWSEM_WAITING_FOR_READ;
155 get_task_struct(tsk);
156
157 raw_spin_lock_irq(&sem->wait_lock);
158 if (list_empty(&sem->wait_list))
159 adjustment += RWSEM_WAITING_BIAS;
160 list_add_tail(&waiter.list, &sem->wait_list);
161
162 /* we're now waiting on the lock, but no longer actively locking */
163 count = rwsem_atomic_update(adjustment, sem);
164
165 /* If there are no active locks, wake the front queued process(es).
166 *
167 * If there are no writers and we are first in the queue,
168 * wake our own waiter to join the existing active readers !
169 */
170 if (count == RWSEM_WAITING_BIAS ||
171 (count > RWSEM_WAITING_BIAS &&
172 adjustment != -RWSEM_ACTIVE_READ_BIAS))
173 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
174
175 raw_spin_unlock_irq(&sem->wait_lock);
176
177 /* wait to be given the lock */
178 while (true) {
179 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
180 if (!waiter.task)
181 break;
182 schedule();
183 }
184
185 tsk->state = TASK_RUNNING;
186
187 return sem;
188}
189
190/*
191 * wait until we successfully acquire the write lock
192 */
193struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
194{
195 long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
196 struct rwsem_waiter waiter;
197 struct task_struct *tsk = current;
198
199 /* set up my own style of waitqueue */
200 waiter.task = tsk;
201 waiter.type = RWSEM_WAITING_FOR_WRITE;
202
203 raw_spin_lock_irq(&sem->wait_lock);
204 if (list_empty(&sem->wait_list))
205 adjustment += RWSEM_WAITING_BIAS;
206 list_add_tail(&waiter.list, &sem->wait_list);
207
208 /* we're now waiting on the lock, but no longer actively locking */
209 count = rwsem_atomic_update(adjustment, sem);
210
211 /* If there were already threads queued before us and there are no
212 * active writers, the lock must be read owned; so we try to wake
213 * any read locks that were queued ahead of us. */
214 if (count > RWSEM_WAITING_BIAS &&
215 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
216 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
217
218 /* wait until we successfully acquire the lock */
219 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
220 while (true) {
221 if (!(count & RWSEM_ACTIVE_MASK)) {
222 /* Try acquiring the write lock. */
223 count = RWSEM_ACTIVE_WRITE_BIAS;
224 if (!list_is_singular(&sem->wait_list))
225 count += RWSEM_WAITING_BIAS;
226
227 if (sem->count == RWSEM_WAITING_BIAS &&
228 cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
229 RWSEM_WAITING_BIAS)
230 break;
231 }
232
233 raw_spin_unlock_irq(&sem->wait_lock);
234
235 /* Block until there are no active lockers. */
236 do {
237 schedule();
238 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
239 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
240
241 raw_spin_lock_irq(&sem->wait_lock);
242 }
243
244 list_del(&waiter.list);
245 raw_spin_unlock_irq(&sem->wait_lock);
246 tsk->state = TASK_RUNNING;
247
248 return sem;
249}
250
251/*
252 * handle waking up a waiter on the semaphore
253 * - up_read/up_write has decremented the active part of count if we come here
254 */
255struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
256{
257 unsigned long flags;
258
259 raw_spin_lock_irqsave(&sem->wait_lock, flags);
260
261 /* do nothing if list empty */
262 if (!list_empty(&sem->wait_list))
263 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
264
265 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
266
267 return sem;
268}
269
270/*
271 * downgrade a write lock into a read lock
272 * - caller incremented waiting part of count and discovered it still negative
273 * - just wake up any readers at the front of the queue
274 */
275struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
276{
277 unsigned long flags;
278
279 raw_spin_lock_irqsave(&sem->wait_lock, flags);
280
281 /* do nothing if list empty */
282 if (!list_empty(&sem->wait_list))
283 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
284
285 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
286
287 return sem;
288}
289
290EXPORT_SYMBOL(rwsem_down_read_failed);
291EXPORT_SYMBOL(rwsem_down_write_failed);
292EXPORT_SYMBOL(rwsem_wake);
293EXPORT_SYMBOL(rwsem_downgrade_wake);