aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile4
-rw-r--r--lib/percpu-rwsem.c165
-rw-r--r--lib/rwsem-spinlock.c296
-rw-r--r--lib/rwsem.c293
-rw-r--r--lib/spinlock_debug.c302
5 files changed, 0 insertions, 1060 deletions
diff --git a/lib/Makefile b/lib/Makefile
index bb016e116ba4..d480a8c92385 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -42,10 +42,6 @@ obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
42obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o 42obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
43obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o 43obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
44obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 44obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
45obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
46lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
47lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
48lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
49 45
50CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) 46CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
51obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 47obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
diff --git a/lib/percpu-rwsem.c b/lib/percpu-rwsem.c
deleted file mode 100644
index 652a8ee8efe9..000000000000
--- a/lib/percpu-rwsem.c
+++ /dev/null
@@ -1,165 +0,0 @@
1#include <linux/atomic.h>
2#include <linux/rwsem.h>
3#include <linux/percpu.h>
4#include <linux/wait.h>
5#include <linux/lockdep.h>
6#include <linux/percpu-rwsem.h>
7#include <linux/rcupdate.h>
8#include <linux/sched.h>
9#include <linux/errno.h>
10
11int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
12 const char *name, struct lock_class_key *rwsem_key)
13{
14 brw->fast_read_ctr = alloc_percpu(int);
15 if (unlikely(!brw->fast_read_ctr))
16 return -ENOMEM;
17
18 /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
19 __init_rwsem(&brw->rw_sem, name, rwsem_key);
20 atomic_set(&brw->write_ctr, 0);
21 atomic_set(&brw->slow_read_ctr, 0);
22 init_waitqueue_head(&brw->write_waitq);
23 return 0;
24}
25
26void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
27{
28 free_percpu(brw->fast_read_ctr);
29 brw->fast_read_ctr = NULL; /* catch use after free bugs */
30}
31
32/*
33 * This is the fast-path for down_read/up_read, it only needs to ensure
34 * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the
35 * fast per-cpu counter. The writer uses synchronize_sched_expedited() to
36 * serialize with the preempt-disabled section below.
37 *
38 * The nontrivial part is that we should guarantee acquire/release semantics
39 * in case when
40 *
41 * R_W: down_write() comes after up_read(), the writer should see all
42 * changes done by the reader
43 * or
44 * W_R: down_read() comes after up_write(), the reader should see all
45 * changes done by the writer
46 *
47 * If this helper fails the callers rely on the normal rw_semaphore and
48 * atomic_dec_and_test(), so in this case we have the necessary barriers.
49 *
50 * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
51 * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
52 * reader inside the critical section. See the comments in down_write and
53 * up_write below.
54 */
55static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
56{
57 bool success = false;
58
59 preempt_disable();
60 if (likely(!atomic_read(&brw->write_ctr))) {
61 __this_cpu_add(*brw->fast_read_ctr, val);
62 success = true;
63 }
64 preempt_enable();
65
66 return success;
67}
68
69/*
70 * Like the normal down_read() this is not recursive, the writer can
71 * come after the first percpu_down_read() and create the deadlock.
72 *
73 * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
74 * percpu_up_read() does rwsem_release(). This pairs with the usage
75 * of ->rw_sem in percpu_down/up_write().
76 */
77void percpu_down_read(struct percpu_rw_semaphore *brw)
78{
79 might_sleep();
80 if (likely(update_fast_ctr(brw, +1))) {
81 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
82 return;
83 }
84
85 down_read(&brw->rw_sem);
86 atomic_inc(&brw->slow_read_ctr);
87 /* avoid up_read()->rwsem_release() */
88 __up_read(&brw->rw_sem);
89}
90
91void percpu_up_read(struct percpu_rw_semaphore *brw)
92{
93 rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
94
95 if (likely(update_fast_ctr(brw, -1)))
96 return;
97
98 /* false-positive is possible but harmless */
99 if (atomic_dec_and_test(&brw->slow_read_ctr))
100 wake_up_all(&brw->write_waitq);
101}
102
103static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
104{
105 unsigned int sum = 0;
106 int cpu;
107
108 for_each_possible_cpu(cpu) {
109 sum += per_cpu(*brw->fast_read_ctr, cpu);
110 per_cpu(*brw->fast_read_ctr, cpu) = 0;
111 }
112
113 return sum;
114}
115
116/*
117 * A writer increments ->write_ctr to force the readers to switch to the
118 * slow mode, note the atomic_read() check in update_fast_ctr().
119 *
120 * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
121 * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
122 * counter it represents the number of active readers.
123 *
124 * Finally the writer takes ->rw_sem for writing and blocks the new readers,
125 * then waits until the slow counter becomes zero.
126 */
127void percpu_down_write(struct percpu_rw_semaphore *brw)
128{
129 /* tell update_fast_ctr() there is a pending writer */
130 atomic_inc(&brw->write_ctr);
131 /*
132 * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
133 * so that update_fast_ctr() can't succeed.
134 *
135 * 2. Ensures we see the result of every previous this_cpu_add() in
136 * update_fast_ctr().
137 *
138 * 3. Ensures that if any reader has exited its critical section via
139 * fast-path, it executes a full memory barrier before we return.
140 * See R_W case in the comment above update_fast_ctr().
141 */
142 synchronize_sched_expedited();
143
144 /* exclude other writers, and block the new readers completely */
145 down_write(&brw->rw_sem);
146
147 /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
148 atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
149
150 /* wait for all readers to complete their percpu_up_read() */
151 wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
152}
153
154void percpu_up_write(struct percpu_rw_semaphore *brw)
155{
156 /* release the lock, but the readers can't use the fast-path */
157 up_write(&brw->rw_sem);
158 /*
159 * Insert the barrier before the next fast-path in down_read,
160 * see W_R case in the comment above update_fast_ctr().
161 */
162 synchronize_sched_expedited();
163 /* the last writer unblocks update_fast_ctr() */
164 atomic_dec(&brw->write_ctr);
165}
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c
deleted file mode 100644
index 9be8a9144978..000000000000
--- a/lib/rwsem-spinlock.c
+++ /dev/null
@@ -1,296 +0,0 @@
1/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2 * generic spinlock implementation
3 *
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
7 */
8#include <linux/rwsem.h>
9#include <linux/sched.h>
10#include <linux/export.h>
11
12enum rwsem_waiter_type {
13 RWSEM_WAITING_FOR_WRITE,
14 RWSEM_WAITING_FOR_READ
15};
16
17struct rwsem_waiter {
18 struct list_head list;
19 struct task_struct *task;
20 enum rwsem_waiter_type type;
21};
22
23int rwsem_is_locked(struct rw_semaphore *sem)
24{
25 int ret = 1;
26 unsigned long flags;
27
28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
29 ret = (sem->activity != 0);
30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
31 }
32 return ret;
33}
34EXPORT_SYMBOL(rwsem_is_locked);
35
36/*
37 * initialise the semaphore
38 */
39void __init_rwsem(struct rw_semaphore *sem, const char *name,
40 struct lock_class_key *key)
41{
42#ifdef CONFIG_DEBUG_LOCK_ALLOC
43 /*
44 * Make sure we are not reinitializing a held semaphore:
45 */
46 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
47 lockdep_init_map(&sem->dep_map, name, key, 0);
48#endif
49 sem->activity = 0;
50 raw_spin_lock_init(&sem->wait_lock);
51 INIT_LIST_HEAD(&sem->wait_list);
52}
53EXPORT_SYMBOL(__init_rwsem);
54
55/*
56 * handle the lock release when processes blocked on it that can now run
57 * - if we come here, then:
58 * - the 'active count' _reached_ zero
59 * - the 'waiting count' is non-zero
60 * - the spinlock must be held by the caller
61 * - woken process blocks are discarded from the list after having task zeroed
62 * - writers are only woken if wakewrite is non-zero
63 */
64static inline struct rw_semaphore *
65__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
66{
67 struct rwsem_waiter *waiter;
68 struct task_struct *tsk;
69 int woken;
70
71 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
72
73 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
74 if (wakewrite)
75 /* Wake up a writer. Note that we do not grant it the
76 * lock - it will have to acquire it when it runs. */
77 wake_up_process(waiter->task);
78 goto out;
79 }
80
81 /* grant an infinite number of read locks to the front of the queue */
82 woken = 0;
83 do {
84 struct list_head *next = waiter->list.next;
85
86 list_del(&waiter->list);
87 tsk = waiter->task;
88 smp_mb();
89 waiter->task = NULL;
90 wake_up_process(tsk);
91 put_task_struct(tsk);
92 woken++;
93 if (next == &sem->wait_list)
94 break;
95 waiter = list_entry(next, struct rwsem_waiter, list);
96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
97
98 sem->activity += woken;
99
100 out:
101 return sem;
102}
103
104/*
105 * wake a single writer
106 */
107static inline struct rw_semaphore *
108__rwsem_wake_one_writer(struct rw_semaphore *sem)
109{
110 struct rwsem_waiter *waiter;
111
112 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
113 wake_up_process(waiter->task);
114
115 return sem;
116}
117
118/*
119 * get a read lock on the semaphore
120 */
121void __sched __down_read(struct rw_semaphore *sem)
122{
123 struct rwsem_waiter waiter;
124 struct task_struct *tsk;
125 unsigned long flags;
126
127 raw_spin_lock_irqsave(&sem->wait_lock, flags);
128
129 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
130 /* granted */
131 sem->activity++;
132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
133 goto out;
134 }
135
136 tsk = current;
137 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
138
139 /* set up my own style of waitqueue */
140 waiter.task = tsk;
141 waiter.type = RWSEM_WAITING_FOR_READ;
142 get_task_struct(tsk);
143
144 list_add_tail(&waiter.list, &sem->wait_list);
145
146 /* we don't need to touch the semaphore struct anymore */
147 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
148
149 /* wait to be given the lock */
150 for (;;) {
151 if (!waiter.task)
152 break;
153 schedule();
154 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
155 }
156
157 tsk->state = TASK_RUNNING;
158 out:
159 ;
160}
161
162/*
163 * trylock for reading -- returns 1 if successful, 0 if contention
164 */
165int __down_read_trylock(struct rw_semaphore *sem)
166{
167 unsigned long flags;
168 int ret = 0;
169
170
171 raw_spin_lock_irqsave(&sem->wait_lock, flags);
172
173 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
174 /* granted */
175 sem->activity++;
176 ret = 1;
177 }
178
179 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
180
181 return ret;
182}
183
184/*
185 * get a write lock on the semaphore
186 */
187void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
188{
189 struct rwsem_waiter waiter;
190 struct task_struct *tsk;
191 unsigned long flags;
192
193 raw_spin_lock_irqsave(&sem->wait_lock, flags);
194
195 /* set up my own style of waitqueue */
196 tsk = current;
197 waiter.task = tsk;
198 waiter.type = RWSEM_WAITING_FOR_WRITE;
199 list_add_tail(&waiter.list, &sem->wait_list);
200
201 /* wait for someone to release the lock */
202 for (;;) {
203 /*
204 * That is the key to support write lock stealing: allows the
205 * task already on CPU to get the lock soon rather than put
206 * itself into sleep and waiting for system woke it or someone
207 * else in the head of the wait list up.
208 */
209 if (sem->activity == 0)
210 break;
211 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
213 schedule();
214 raw_spin_lock_irqsave(&sem->wait_lock, flags);
215 }
216 /* got the lock */
217 sem->activity = -1;
218 list_del(&waiter.list);
219
220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
221}
222
223void __sched __down_write(struct rw_semaphore *sem)
224{
225 __down_write_nested(sem, 0);
226}
227
228/*
229 * trylock for writing -- returns 1 if successful, 0 if contention
230 */
231int __down_write_trylock(struct rw_semaphore *sem)
232{
233 unsigned long flags;
234 int ret = 0;
235
236 raw_spin_lock_irqsave(&sem->wait_lock, flags);
237
238 if (sem->activity == 0) {
239 /* got the lock */
240 sem->activity = -1;
241 ret = 1;
242 }
243
244 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
245
246 return ret;
247}
248
249/*
250 * release a read lock on the semaphore
251 */
252void __up_read(struct rw_semaphore *sem)
253{
254 unsigned long flags;
255
256 raw_spin_lock_irqsave(&sem->wait_lock, flags);
257
258 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
259 sem = __rwsem_wake_one_writer(sem);
260
261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
262}
263
264/*
265 * release a write lock on the semaphore
266 */
267void __up_write(struct rw_semaphore *sem)
268{
269 unsigned long flags;
270
271 raw_spin_lock_irqsave(&sem->wait_lock, flags);
272
273 sem->activity = 0;
274 if (!list_empty(&sem->wait_list))
275 sem = __rwsem_do_wake(sem, 1);
276
277 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
278}
279
280/*
281 * downgrade a write lock into a read lock
282 * - just wake up any readers at the front of the queue
283 */
284void __downgrade_write(struct rw_semaphore *sem)
285{
286 unsigned long flags;
287
288 raw_spin_lock_irqsave(&sem->wait_lock, flags);
289
290 sem->activity = 1;
291 if (!list_empty(&sem->wait_list))
292 sem = __rwsem_do_wake(sem, 0);
293
294 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
295}
296
diff --git a/lib/rwsem.c b/lib/rwsem.c
deleted file mode 100644
index 19c5fa95e0b4..000000000000
--- a/lib/rwsem.c
+++ /dev/null
@@ -1,293 +0,0 @@
1/* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
8 */
9#include <linux/rwsem.h>
10#include <linux/sched.h>
11#include <linux/init.h>
12#include <linux/export.h>
13
14/*
15 * Initialize an rwsem:
16 */
17void __init_rwsem(struct rw_semaphore *sem, const char *name,
18 struct lock_class_key *key)
19{
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 /*
22 * Make sure we are not reinitializing a held semaphore:
23 */
24 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
25 lockdep_init_map(&sem->dep_map, name, key, 0);
26#endif
27 sem->count = RWSEM_UNLOCKED_VALUE;
28 raw_spin_lock_init(&sem->wait_lock);
29 INIT_LIST_HEAD(&sem->wait_list);
30}
31
32EXPORT_SYMBOL(__init_rwsem);
33
34enum rwsem_waiter_type {
35 RWSEM_WAITING_FOR_WRITE,
36 RWSEM_WAITING_FOR_READ
37};
38
39struct rwsem_waiter {
40 struct list_head list;
41 struct task_struct *task;
42 enum rwsem_waiter_type type;
43};
44
45enum rwsem_wake_type {
46 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
47 RWSEM_WAKE_READERS, /* Wake readers only */
48 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
49};
50
51/*
52 * handle the lock release when processes blocked on it that can now run
53 * - if we come here from up_xxxx(), then:
54 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
55 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
56 * - there must be someone on the queue
57 * - the spinlock must be held by the caller
58 * - woken process blocks are discarded from the list after having task zeroed
59 * - writers are only woken if downgrading is false
60 */
61static struct rw_semaphore *
62__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
63{
64 struct rwsem_waiter *waiter;
65 struct task_struct *tsk;
66 struct list_head *next;
67 long oldcount, woken, loop, adjustment;
68
69 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
70 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
71 if (wake_type == RWSEM_WAKE_ANY)
72 /* Wake writer at the front of the queue, but do not
73 * grant it the lock yet as we want other writers
74 * to be able to steal it. Readers, on the other hand,
75 * will block as they will notice the queued writer.
76 */
77 wake_up_process(waiter->task);
78 goto out;
79 }
80
81 /* Writers might steal the lock before we grant it to the next reader.
82 * We prefer to do the first reader grant before counting readers
83 * so we can bail out early if a writer stole the lock.
84 */
85 adjustment = 0;
86 if (wake_type != RWSEM_WAKE_READ_OWNED) {
87 adjustment = RWSEM_ACTIVE_READ_BIAS;
88 try_reader_grant:
89 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
90 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
91 /* A writer stole the lock. Undo our reader grant. */
92 if (rwsem_atomic_update(-adjustment, sem) &
93 RWSEM_ACTIVE_MASK)
94 goto out;
95 /* Last active locker left. Retry waking readers. */
96 goto try_reader_grant;
97 }
98 }
99
100 /* Grant an infinite number of read locks to the readers at the front
101 * of the queue. Note we increment the 'active part' of the count by
102 * the number of readers before waking any processes up.
103 */
104 woken = 0;
105 do {
106 woken++;
107
108 if (waiter->list.next == &sem->wait_list)
109 break;
110
111 waiter = list_entry(waiter->list.next,
112 struct rwsem_waiter, list);
113
114 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
115
116 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
117 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
118 /* hit end of list above */
119 adjustment -= RWSEM_WAITING_BIAS;
120
121 if (adjustment)
122 rwsem_atomic_add(adjustment, sem);
123
124 next = sem->wait_list.next;
125 loop = woken;
126 do {
127 waiter = list_entry(next, struct rwsem_waiter, list);
128 next = waiter->list.next;
129 tsk = waiter->task;
130 smp_mb();
131 waiter->task = NULL;
132 wake_up_process(tsk);
133 put_task_struct(tsk);
134 } while (--loop);
135
136 sem->wait_list.next = next;
137 next->prev = &sem->wait_list;
138
139 out:
140 return sem;
141}
142
143/*
144 * wait for the read lock to be granted
145 */
146struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
147{
148 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
149 struct rwsem_waiter waiter;
150 struct task_struct *tsk = current;
151
152 /* set up my own style of waitqueue */
153 waiter.task = tsk;
154 waiter.type = RWSEM_WAITING_FOR_READ;
155 get_task_struct(tsk);
156
157 raw_spin_lock_irq(&sem->wait_lock);
158 if (list_empty(&sem->wait_list))
159 adjustment += RWSEM_WAITING_BIAS;
160 list_add_tail(&waiter.list, &sem->wait_list);
161
162 /* we're now waiting on the lock, but no longer actively locking */
163 count = rwsem_atomic_update(adjustment, sem);
164
165 /* If there are no active locks, wake the front queued process(es).
166 *
167 * If there are no writers and we are first in the queue,
168 * wake our own waiter to join the existing active readers !
169 */
170 if (count == RWSEM_WAITING_BIAS ||
171 (count > RWSEM_WAITING_BIAS &&
172 adjustment != -RWSEM_ACTIVE_READ_BIAS))
173 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
174
175 raw_spin_unlock_irq(&sem->wait_lock);
176
177 /* wait to be given the lock */
178 while (true) {
179 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
180 if (!waiter.task)
181 break;
182 schedule();
183 }
184
185 tsk->state = TASK_RUNNING;
186
187 return sem;
188}
189
190/*
191 * wait until we successfully acquire the write lock
192 */
193struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
194{
195 long count, adjustment = -RWSEM_ACTIVE_WRITE_BIAS;
196 struct rwsem_waiter waiter;
197 struct task_struct *tsk = current;
198
199 /* set up my own style of waitqueue */
200 waiter.task = tsk;
201 waiter.type = RWSEM_WAITING_FOR_WRITE;
202
203 raw_spin_lock_irq(&sem->wait_lock);
204 if (list_empty(&sem->wait_list))
205 adjustment += RWSEM_WAITING_BIAS;
206 list_add_tail(&waiter.list, &sem->wait_list);
207
208 /* we're now waiting on the lock, but no longer actively locking */
209 count = rwsem_atomic_update(adjustment, sem);
210
211 /* If there were already threads queued before us and there are no
212 * active writers, the lock must be read owned; so we try to wake
213 * any read locks that were queued ahead of us. */
214 if (count > RWSEM_WAITING_BIAS &&
215 adjustment == -RWSEM_ACTIVE_WRITE_BIAS)
216 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
217
218 /* wait until we successfully acquire the lock */
219 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
220 while (true) {
221 if (!(count & RWSEM_ACTIVE_MASK)) {
222 /* Try acquiring the write lock. */
223 count = RWSEM_ACTIVE_WRITE_BIAS;
224 if (!list_is_singular(&sem->wait_list))
225 count += RWSEM_WAITING_BIAS;
226
227 if (sem->count == RWSEM_WAITING_BIAS &&
228 cmpxchg(&sem->count, RWSEM_WAITING_BIAS, count) ==
229 RWSEM_WAITING_BIAS)
230 break;
231 }
232
233 raw_spin_unlock_irq(&sem->wait_lock);
234
235 /* Block until there are no active lockers. */
236 do {
237 schedule();
238 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
239 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
240
241 raw_spin_lock_irq(&sem->wait_lock);
242 }
243
244 list_del(&waiter.list);
245 raw_spin_unlock_irq(&sem->wait_lock);
246 tsk->state = TASK_RUNNING;
247
248 return sem;
249}
250
251/*
252 * handle waking up a waiter on the semaphore
253 * - up_read/up_write has decremented the active part of count if we come here
254 */
255struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
256{
257 unsigned long flags;
258
259 raw_spin_lock_irqsave(&sem->wait_lock, flags);
260
261 /* do nothing if list empty */
262 if (!list_empty(&sem->wait_list))
263 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
264
265 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
266
267 return sem;
268}
269
270/*
271 * downgrade a write lock into a read lock
272 * - caller incremented waiting part of count and discovered it still negative
273 * - just wake up any readers at the front of the queue
274 */
275struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
276{
277 unsigned long flags;
278
279 raw_spin_lock_irqsave(&sem->wait_lock, flags);
280
281 /* do nothing if list empty */
282 if (!list_empty(&sem->wait_list))
283 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
284
285 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
286
287 return sem;
288}
289
290EXPORT_SYMBOL(rwsem_down_read_failed);
291EXPORT_SYMBOL(rwsem_down_write_failed);
292EXPORT_SYMBOL(rwsem_wake);
293EXPORT_SYMBOL(rwsem_downgrade_wake);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
deleted file mode 100644
index 0374a596cffa..000000000000
--- a/lib/spinlock_debug.c
+++ /dev/null
@@ -1,302 +0,0 @@
1/*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
4 *
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
8
9#include <linux/spinlock.h>
10#include <linux/nmi.h>
11#include <linux/interrupt.h>
12#include <linux/debug_locks.h>
13#include <linux/delay.h>
14#include <linux/export.h>
15
16void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
17 struct lock_class_key *key)
18{
19#ifdef CONFIG_DEBUG_LOCK_ALLOC
20 /*
21 * Make sure we are not reinitializing a held lock:
22 */
23 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
24 lockdep_init_map(&lock->dep_map, name, key, 0);
25#endif
26 lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
27 lock->magic = SPINLOCK_MAGIC;
28 lock->owner = SPINLOCK_OWNER_INIT;
29 lock->owner_cpu = -1;
30}
31
32EXPORT_SYMBOL(__raw_spin_lock_init);
33
34void __rwlock_init(rwlock_t *lock, const char *name,
35 struct lock_class_key *key)
36{
37#ifdef CONFIG_DEBUG_LOCK_ALLOC
38 /*
39 * Make sure we are not reinitializing a held lock:
40 */
41 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
42 lockdep_init_map(&lock->dep_map, name, key, 0);
43#endif
44 lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
45 lock->magic = RWLOCK_MAGIC;
46 lock->owner = SPINLOCK_OWNER_INIT;
47 lock->owner_cpu = -1;
48}
49
50EXPORT_SYMBOL(__rwlock_init);
51
52static void spin_dump(raw_spinlock_t *lock, const char *msg)
53{
54 struct task_struct *owner = NULL;
55
56 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
57 owner = lock->owner;
58 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
59 msg, raw_smp_processor_id(),
60 current->comm, task_pid_nr(current));
61 printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
62 ".owner_cpu: %d\n",
63 lock, lock->magic,
64 owner ? owner->comm : "<none>",
65 owner ? task_pid_nr(owner) : -1,
66 lock->owner_cpu);
67 dump_stack();
68}
69
70static void spin_bug(raw_spinlock_t *lock, const char *msg)
71{
72 if (!debug_locks_off())
73 return;
74
75 spin_dump(lock, msg);
76}
77
78#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
79
80static inline void
81debug_spin_lock_before(raw_spinlock_t *lock)
82{
83 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
84 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
85 SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
86 lock, "cpu recursion");
87}
88
89static inline void debug_spin_lock_after(raw_spinlock_t *lock)
90{
91 lock->owner_cpu = raw_smp_processor_id();
92 lock->owner = current;
93}
94
95static inline void debug_spin_unlock(raw_spinlock_t *lock)
96{
97 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
98 SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
99 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
100 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
101 lock, "wrong CPU");
102 lock->owner = SPINLOCK_OWNER_INIT;
103 lock->owner_cpu = -1;
104}
105
106static void __spin_lock_debug(raw_spinlock_t *lock)
107{
108 u64 i;
109 u64 loops = loops_per_jiffy * HZ;
110
111 for (i = 0; i < loops; i++) {
112 if (arch_spin_trylock(&lock->raw_lock))
113 return;
114 __delay(1);
115 }
116 /* lockup suspected: */
117 spin_dump(lock, "lockup suspected");
118#ifdef CONFIG_SMP
119 trigger_all_cpu_backtrace();
120#endif
121
122 /*
123 * The trylock above was causing a livelock. Give the lower level arch
124 * specific lock code a chance to acquire the lock. We have already
125 * printed a warning/backtrace at this point. The non-debug arch
126 * specific code might actually succeed in acquiring the lock. If it is
127 * not successful, the end-result is the same - there is no forward
128 * progress.
129 */
130 arch_spin_lock(&lock->raw_lock);
131}
132
133void do_raw_spin_lock(raw_spinlock_t *lock)
134{
135 debug_spin_lock_before(lock);
136 if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
137 __spin_lock_debug(lock);
138 debug_spin_lock_after(lock);
139}
140
141int do_raw_spin_trylock(raw_spinlock_t *lock)
142{
143 int ret = arch_spin_trylock(&lock->raw_lock);
144
145 if (ret)
146 debug_spin_lock_after(lock);
147#ifndef CONFIG_SMP
148 /*
149 * Must not happen on UP:
150 */
151 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
152#endif
153 return ret;
154}
155
156void do_raw_spin_unlock(raw_spinlock_t *lock)
157{
158 debug_spin_unlock(lock);
159 arch_spin_unlock(&lock->raw_lock);
160}
161
162static void rwlock_bug(rwlock_t *lock, const char *msg)
163{
164 if (!debug_locks_off())
165 return;
166
167 printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
168 msg, raw_smp_processor_id(), current->comm,
169 task_pid_nr(current), lock);
170 dump_stack();
171}
172
173#define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
174
175#if 0 /* __write_lock_debug() can lock up - maybe this can too? */
176static void __read_lock_debug(rwlock_t *lock)
177{
178 u64 i;
179 u64 loops = loops_per_jiffy * HZ;
180 int print_once = 1;
181
182 for (;;) {
183 for (i = 0; i < loops; i++) {
184 if (arch_read_trylock(&lock->raw_lock))
185 return;
186 __delay(1);
187 }
188 /* lockup suspected: */
189 if (print_once) {
190 print_once = 0;
191 printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
192 "%s/%d, %p\n",
193 raw_smp_processor_id(), current->comm,
194 current->pid, lock);
195 dump_stack();
196 }
197 }
198}
199#endif
200
201void do_raw_read_lock(rwlock_t *lock)
202{
203 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
204 arch_read_lock(&lock->raw_lock);
205}
206
207int do_raw_read_trylock(rwlock_t *lock)
208{
209 int ret = arch_read_trylock(&lock->raw_lock);
210
211#ifndef CONFIG_SMP
212 /*
213 * Must not happen on UP:
214 */
215 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
216#endif
217 return ret;
218}
219
220void do_raw_read_unlock(rwlock_t *lock)
221{
222 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
223 arch_read_unlock(&lock->raw_lock);
224}
225
226static inline void debug_write_lock_before(rwlock_t *lock)
227{
228 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
229 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
230 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
231 lock, "cpu recursion");
232}
233
234static inline void debug_write_lock_after(rwlock_t *lock)
235{
236 lock->owner_cpu = raw_smp_processor_id();
237 lock->owner = current;
238}
239
240static inline void debug_write_unlock(rwlock_t *lock)
241{
242 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
243 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
244 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
245 lock, "wrong CPU");
246 lock->owner = SPINLOCK_OWNER_INIT;
247 lock->owner_cpu = -1;
248}
249
250#if 0 /* This can cause lockups */
251static void __write_lock_debug(rwlock_t *lock)
252{
253 u64 i;
254 u64 loops = loops_per_jiffy * HZ;
255 int print_once = 1;
256
257 for (;;) {
258 for (i = 0; i < loops; i++) {
259 if (arch_write_trylock(&lock->raw_lock))
260 return;
261 __delay(1);
262 }
263 /* lockup suspected: */
264 if (print_once) {
265 print_once = 0;
266 printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
267 "%s/%d, %p\n",
268 raw_smp_processor_id(), current->comm,
269 current->pid, lock);
270 dump_stack();
271 }
272 }
273}
274#endif
275
276void do_raw_write_lock(rwlock_t *lock)
277{
278 debug_write_lock_before(lock);
279 arch_write_lock(&lock->raw_lock);
280 debug_write_lock_after(lock);
281}
282
283int do_raw_write_trylock(rwlock_t *lock)
284{
285 int ret = arch_write_trylock(&lock->raw_lock);
286
287 if (ret)
288 debug_write_lock_after(lock);
289#ifndef CONFIG_SMP
290 /*
291 * Must not happen on UP:
292 */
293 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
294#endif
295 return ret;
296}
297
298void do_raw_write_unlock(rwlock_t *lock)
299{
300 debug_write_unlock(lock);
301 arch_write_unlock(&lock->raw_lock);
302}