aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 11:25:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 11:25:29 -0400
commitd7bb545d86825e635cab33a1dd81ca0ad7b92887 (patch)
tree34da4139ef06ceab6549aea3906639c7413978c8 /lib
parent75e98b34155264d943aa53edce465e87f3ccbadf (diff)
parent2342e51ba2b52a7f5b78227e6faa4603ed3632a0 (diff)
Merge branch 'semaphore' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
* 'semaphore' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: Remove DEBUG_SEMAPHORE from Kconfig Improve semaphore documentation Simplify semaphore implementation Add down_timeout and change ACPI to use it Introduce down_killable() Generic semaphore implementation Add semaphore.h to kernel_lock.c Fix quota.h includes
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/Makefile1
-rw-r--r--lib/kernel_lock.c1
-rw-r--r--lib/semaphore-sleepers.c176
4 files changed, 1 insertions, 187 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index eef557dc46c3..80db357b0a42 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -265,16 +265,6 @@ config DEBUG_MUTEXES
265 This feature allows mutex semantics violations to be detected and 265 This feature allows mutex semantics violations to be detected and
266 reported. 266 reported.
267 267
268config DEBUG_SEMAPHORE
269 bool "Semaphore debugging"
270 depends on DEBUG_KERNEL
271 depends on ALPHA || FRV
272 default n
273 help
274 If you say Y here then semaphore processing will issue lots of
275 verbose debugging messages. If you suspect a semaphore problem or a
276 kernel hacker asks for this option then say Y. Otherwise say N.
277
278config DEBUG_LOCK_ALLOC 268config DEBUG_LOCK_ALLOC
279 bool "Lock debugging: detect incorrect freeing of live locks" 269 bool "Lock debugging: detect incorrect freeing of live locks"
280 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 270 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
diff --git a/lib/Makefile b/lib/Makefile
index 23de261a4c83..28dba90d5020 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
32lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
33lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 32lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
34obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 33obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
35obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 34obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 812dbf00844b..fbc11a336bc5 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -8,6 +8,7 @@
8#include <linux/smp_lock.h> 8#include <linux/smp_lock.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <asm/semaphore.h>
11 12
12/* 13/*
13 * The 'big kernel semaphore' 14 * The 'big kernel semaphore'
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
deleted file mode 100644
index 0198782cdacb..000000000000
--- a/lib/semaphore-sleepers.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * i386 and x86-64 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <asm/semaphore.h>
19
20/*
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to acquire the semaphore, while the "sleeping"
24 * variable is a count of such acquires.
25 *
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
30 *
31 * "sleeping" and the contention routine ordering is protected
32 * by the spinlock in the semaphore's waitqueue head.
33 *
34 * Note that these functions are only called when there is
35 * contention on the lock, and as such all this is the
36 * "non-critical" part of the whole semaphore business. The
37 * critical part is the inline stuff in <asm/semaphore.h>
38 * where we want to avoid any extra jumps and calls.
39 */
40
41/*
42 * Logic:
43 * - only on a boundary condition do we need to care. When we go
44 * from a negative count to a non-negative, we wake people up.
45 * - when we go from a non-negative count to a negative do we
46 * (a) synchronize with the "sleeper" count and (b) make sure
47 * that we're on the wakeup list before we synchronize so that
48 * we cannot lose wakeup events.
49 */
50
51void __up(struct semaphore *sem)
52{
53 wake_up(&sem->wait);
54}
55
56void __sched __down(struct semaphore *sem)
57{
58 struct task_struct *tsk = current;
59 DECLARE_WAITQUEUE(wait, tsk);
60 unsigned long flags;
61
62 tsk->state = TASK_UNINTERRUPTIBLE;
63 spin_lock_irqsave(&sem->wait.lock, flags);
64 add_wait_queue_exclusive_locked(&sem->wait, &wait);
65
66 sem->sleepers++;
67 for (;;) {
68 int sleepers = sem->sleepers;
69
70 /*
71 * Add "everybody else" into it. They aren't
72 * playing, because we own the spinlock in
73 * the wait_queue_head.
74 */
75 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
76 sem->sleepers = 0;
77 break;
78 }
79 sem->sleepers = 1; /* us - see -1 above */
80 spin_unlock_irqrestore(&sem->wait.lock, flags);
81
82 schedule();
83
84 spin_lock_irqsave(&sem->wait.lock, flags);
85 tsk->state = TASK_UNINTERRUPTIBLE;
86 }
87 remove_wait_queue_locked(&sem->wait, &wait);
88 wake_up_locked(&sem->wait);
89 spin_unlock_irqrestore(&sem->wait.lock, flags);
90 tsk->state = TASK_RUNNING;
91}
92
93int __sched __down_interruptible(struct semaphore *sem)
94{
95 int retval = 0;
96 struct task_struct *tsk = current;
97 DECLARE_WAITQUEUE(wait, tsk);
98 unsigned long flags;
99
100 tsk->state = TASK_INTERRUPTIBLE;
101 spin_lock_irqsave(&sem->wait.lock, flags);
102 add_wait_queue_exclusive_locked(&sem->wait, &wait);
103
104 sem->sleepers++;
105 for (;;) {
106 int sleepers = sem->sleepers;
107
108 /*
109 * With signals pending, this turns into
110 * the trylock failure case - we won't be
111 * sleeping, and we* can't get the lock as
112 * it has contention. Just correct the count
113 * and exit.
114 */
115 if (signal_pending(current)) {
116 retval = -EINTR;
117 sem->sleepers = 0;
118 atomic_add(sleepers, &sem->count);
119 break;
120 }
121
122 /*
123 * Add "everybody else" into it. They aren't
124 * playing, because we own the spinlock in
125 * wait_queue_head. The "-1" is because we're
126 * still hoping to get the semaphore.
127 */
128 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
129 sem->sleepers = 0;
130 break;
131 }
132 sem->sleepers = 1; /* us - see -1 above */
133 spin_unlock_irqrestore(&sem->wait.lock, flags);
134
135 schedule();
136
137 spin_lock_irqsave(&sem->wait.lock, flags);
138 tsk->state = TASK_INTERRUPTIBLE;
139 }
140 remove_wait_queue_locked(&sem->wait, &wait);
141 wake_up_locked(&sem->wait);
142 spin_unlock_irqrestore(&sem->wait.lock, flags);
143
144 tsk->state = TASK_RUNNING;
145 return retval;
146}
147
148/*
149 * Trylock failed - make sure we correct for
150 * having decremented the count.
151 *
152 * We could have done the trylock with a
153 * single "cmpxchg" without failure cases,
154 * but then it wouldn't work on a 386.
155 */
156int __down_trylock(struct semaphore *sem)
157{
158 int sleepers;
159 unsigned long flags;
160
161 spin_lock_irqsave(&sem->wait.lock, flags);
162 sleepers = sem->sleepers + 1;
163 sem->sleepers = 0;
164
165 /*
166 * Add "everybody else" and us into it. They aren't
167 * playing, because we own the spinlock in the
168 * wait_queue_head.
169 */
170 if (!atomic_add_negative(sleepers, &sem->count)) {
171 wake_up_locked(&sem->wait);
172 }
173
174 spin_unlock_irqrestore(&sem->wait.lock, flags);
175 return 1;
176}