aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2008-03-07 21:55:58 -0500
committerMatthew Wilcox <willy@linux.intel.com>2008-04-17 10:42:34 -0400
commit64ac24e738823161693bf791f87adc802cf529ff (patch)
tree19c0b0cf314d4394ca580c05b86cdf874ce0a167
parente48b3deee475134585eed03e7afebe4bf9e0dba9 (diff)
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C implementation is better for maintainability, debuggability and extensibility. Thanks to Peter Zijlstra for fixing the lockdep warning. Thanks to Harvey Harrison for pointing out that the unlikely() was unnecessary. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Acked-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/alpha/kernel/Makefile2
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c9
-rw-r--r--arch/alpha/kernel/semaphore.c224
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/semaphore.c221
-rw-r--r--arch/avr32/kernel/Makefile2
-rw-r--r--arch/avr32/kernel/semaphore.c148
-rw-r--r--arch/blackfin/Kconfig4
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c5
-rw-r--r--arch/cris/kernel/Makefile3
-rw-r--r--arch/cris/kernel/crisksyms.c7
-rw-r--r--arch/cris/kernel/semaphore.c129
-rw-r--r--arch/frv/kernel/Makefile2
-rw-r--r--arch/frv/kernel/frv_ksyms.c1
-rw-r--r--arch/frv/kernel/semaphore.c155
-rw-r--r--arch/h8300/kernel/Makefile2
-rw-r--r--arch/h8300/kernel/h8300_ksyms.c1
-rw-r--r--arch/h8300/kernel/semaphore.c132
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c6
-rw-r--r--arch/ia64/kernel/semaphore.c165
-rw-r--r--arch/m32r/kernel/Makefile2
-rw-r--r--arch/m32r/kernel/m32r_ksyms.c5
-rw-r--r--arch/m32r/kernel/semaphore.c185
-rw-r--r--arch/m68k/kernel/Makefile2
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c6
-rw-r--r--arch/m68k/kernel/semaphore.c132
-rw-r--r--arch/m68k/lib/Makefile2
-rw-r--r--arch/m68k/lib/semaphore.S53
-rw-r--r--arch/m68knommu/kernel/Makefile2
-rw-r--r--arch/m68knommu/kernel/m68k_ksyms.c6
-rw-r--r--arch/m68knommu/kernel/semaphore.c133
-rw-r--r--arch/m68knommu/lib/Makefile2
-rw-r--r--arch/m68knommu/lib/semaphore.S66
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/semaphore.c168
-rw-r--r--arch/mn10300/kernel/Makefile2
-rw-r--r--arch/mn10300/kernel/semaphore.c149
-rw-r--r--arch/parisc/kernel/Makefile2
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c5
-rw-r--r--arch/parisc/kernel/semaphore.c102
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c1
-rw-r--r--arch/powerpc/kernel/semaphore.c135
-rw-r--r--arch/ppc/kernel/semaphore.c131
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/s390_ksyms.c7
-rw-r--r--arch/s390/kernel/semaphore.c108
-rw-r--r--arch/sh/kernel/Makefile_322
-rw-r--r--arch/sh/kernel/Makefile_642
-rw-r--r--arch/sh/kernel/semaphore.c139
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c7
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c4
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/semaphore.c155
-rw-r--r--arch/sparc/kernel/sparc_ksyms.c5
-rw-r--r--arch/sparc64/kernel/Makefile2
-rw-r--r--arch/sparc64/kernel/semaphore.c254
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c6
-rw-r--r--arch/um/Kconfig.i3864
-rw-r--r--arch/um/Kconfig.x86_644
-rw-r--r--arch/um/sys-i386/ksyms.c12
-rw-r--r--arch/um/sys-ppc/Makefile8
-rw-r--r--arch/um/sys-x86_64/ksyms.c13
-rw-r--r--arch/v850/kernel/Makefile2
-rw-r--r--arch/v850/kernel/semaphore.c166
-rw-r--r--arch/v850/kernel/v850_ksyms.c7
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c5
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c6
-rw-r--r--arch/x86/lib/semaphore_32.S83
-rw-r--r--arch/x86/lib/thunk_64.S5
-rw-r--r--arch/xtensa/kernel/Makefile2
-rw-r--r--arch/xtensa/kernel/semaphore.c226
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c9
-rw-r--r--include/asm-alpha/semaphore.h150
-rw-r--r--include/asm-arm/semaphore-helper.h84
-rw-r--r--include/asm-arm/semaphore.h99
-rw-r--r--include/asm-avr32/semaphore.h109
-rw-r--r--include/asm-blackfin/semaphore-helper.h82
-rw-r--r--include/asm-blackfin/semaphore.h106
-rw-r--r--include/asm-cris/semaphore-helper.h78
-rw-r--r--include/asm-cris/semaphore.h134
-rw-r--r--include/asm-frv/semaphore.h156
-rw-r--r--include/asm-h8300/semaphore-helper.h85
-rw-r--r--include/asm-h8300/semaphore.h191
-rw-r--r--include/asm-ia64/semaphore.h100
-rw-r--r--include/asm-m32r/semaphore.h145
-rw-r--r--include/asm-m68k/semaphore-helper.h142
-rw-r--r--include/asm-m68k/semaphore.h164
-rw-r--r--include/asm-m68knommu/semaphore-helper.h82
-rw-r--r--include/asm-m68knommu/semaphore.h154
-rw-r--r--include/asm-mips/semaphore.h109
-rw-r--r--include/asm-mn10300/semaphore.h170
-rw-r--r--include/asm-parisc/semaphore-helper.h89
-rw-r--r--include/asm-parisc/semaphore.h146
-rw-r--r--include/asm-powerpc/semaphore.h95
-rw-r--r--include/asm-s390/semaphore.h108
-rw-r--r--include/asm-sh/semaphore-helper.h89
-rw-r--r--include/asm-sh/semaphore.h116
-rw-r--r--include/asm-sparc/semaphore.h193
-rw-r--r--include/asm-sparc64/semaphore.h54
-rw-r--r--include/asm-um/semaphore.h7
-rw-r--r--include/asm-v850/semaphore.h85
-rw-r--r--include/asm-x86/semaphore.h6
-rw-r--r--include/asm-x86/semaphore_32.h175
-rw-r--r--include/asm-x86/semaphore_64.h180
-rw-r--r--include/asm-xtensa/semaphore.h100
-rw-r--r--include/linux/semaphore.h77
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/semaphore.c187
-rw-r--r--lib/Makefile1
-rw-r--r--lib/semaphore-sleepers.c176
113 files changed, 314 insertions, 7679 deletions
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
index dccf05245d4d..ac706c1d7ada 100644
--- a/arch/alpha/kernel/Makefile
+++ b/arch/alpha/kernel/Makefile
@@ -7,7 +7,7 @@ EXTRA_AFLAGS := $(KBUILD_CFLAGS)
7EXTRA_CFLAGS := -Werror -Wno-sign-compare 7EXTRA_CFLAGS := -Werror -Wno-sign-compare
8 8
9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ 9obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
10 irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \ 10 irq_alpha.o signal.o setup.o ptrace.o time.o \
11 alpha_ksyms.o systbls.o err_common.o io.o 11 alpha_ksyms.o systbls.o err_common.o io.o
12 12
13obj-$(CONFIG_VGA_HOSE) += console.o 13obj-$(CONFIG_VGA_HOSE) += console.o
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index e9762a33b043..d96e742d4dc2 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -77,15 +77,6 @@ EXPORT_SYMBOL(__do_clear_user);
77EXPORT_SYMBOL(__strncpy_from_user); 77EXPORT_SYMBOL(__strncpy_from_user);
78EXPORT_SYMBOL(__strnlen_user); 78EXPORT_SYMBOL(__strnlen_user);
79 79
80/* Semaphore helper functions. */
81EXPORT_SYMBOL(__down_failed);
82EXPORT_SYMBOL(__down_failed_interruptible);
83EXPORT_SYMBOL(__up_wakeup);
84EXPORT_SYMBOL(down);
85EXPORT_SYMBOL(down_interruptible);
86EXPORT_SYMBOL(down_trylock);
87EXPORT_SYMBOL(up);
88
89/* 80/*
90 * SMP-specific symbols. 81 * SMP-specific symbols.
91 */ 82 */
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c
deleted file mode 100644
index 8d2982aa1b8d..000000000000
--- a/arch/alpha/kernel/semaphore.c
+++ /dev/null
@@ -1,224 +0,0 @@
1/*
2 * Alpha semaphore implementation.
3 *
4 * (C) Copyright 1996 Linus Torvalds
5 * (C) Copyright 1999, 2000 Richard Henderson
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/init.h>
11
12/*
13 * This is basically the PPC semaphore scheme ported to use
14 * the Alpha ll/sc sequences, so see the PPC code for
15 * credits.
16 */
17
18/*
19 * Atomically update sem->count.
20 * This does the equivalent of the following:
21 *
22 * old_count = sem->count;
23 * tmp = MAX(old_count, 0) + incr;
24 * sem->count = tmp;
25 * return old_count;
26 */
27static inline int __sem_update_count(struct semaphore *sem, int incr)
28{
29 long old_count, tmp = 0;
30
31 __asm__ __volatile__(
32 "1: ldl_l %0,%2\n"
33 " cmovgt %0,%0,%1\n"
34 " addl %1,%3,%1\n"
35 " stl_c %1,%2\n"
36 " beq %1,2f\n"
37 " mb\n"
38 ".subsection 2\n"
39 "2: br 1b\n"
40 ".previous"
41 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
42 : "Ir" (incr), "1" (tmp), "m" (sem->count));
43
44 return old_count;
45}
46
47/*
48 * Perform the "down" function. Return zero for semaphore acquired,
49 * return negative for signalled out of the function.
50 *
51 * If called from down, the return is ignored and the wait loop is
52 * not interruptible. This means that a task waiting on a semaphore
53 * using "down()" cannot be killed until someone does an "up()" on
54 * the semaphore.
55 *
56 * If called from down_interruptible, the return value gets checked
57 * upon return. If the return value is negative then the task continues
58 * with the negative value in the return register (it can be tested by
59 * the caller).
60 *
61 * Either form may be used in conjunction with "up()".
62 */
63
64void __sched
65__down_failed(struct semaphore *sem)
66{
67 struct task_struct *tsk = current;
68 DECLARE_WAITQUEUE(wait, tsk);
69
70#ifdef CONFIG_DEBUG_SEMAPHORE
71 printk("%s(%d): down failed(%p)\n",
72 tsk->comm, task_pid_nr(tsk), sem);
73#endif
74
75 tsk->state = TASK_UNINTERRUPTIBLE;
76 wmb();
77 add_wait_queue_exclusive(&sem->wait, &wait);
78
79 /*
80 * Try to get the semaphore. If the count is > 0, then we've
81 * got the semaphore; we decrement count and exit the loop.
82 * If the count is 0 or negative, we set it to -1, indicating
83 * that we are asleep, and then sleep.
84 */
85 while (__sem_update_count(sem, -1) <= 0) {
86 schedule();
87 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
88 }
89 remove_wait_queue(&sem->wait, &wait);
90 tsk->state = TASK_RUNNING;
91
92 /*
93 * If there are any more sleepers, wake one of them up so
94 * that it can either get the semaphore, or set count to -1
95 * indicating that there are still processes sleeping.
96 */
97 wake_up(&sem->wait);
98
99#ifdef CONFIG_DEBUG_SEMAPHORE
100 printk("%s(%d): down acquired(%p)\n",
101 tsk->comm, task_pid_nr(tsk), sem);
102#endif
103}
104
105int __sched
106__down_failed_interruptible(struct semaphore *sem)
107{
108 struct task_struct *tsk = current;
109 DECLARE_WAITQUEUE(wait, tsk);
110 long ret = 0;
111
112#ifdef CONFIG_DEBUG_SEMAPHORE
113 printk("%s(%d): down failed(%p)\n",
114 tsk->comm, task_pid_nr(tsk), sem);
115#endif
116
117 tsk->state = TASK_INTERRUPTIBLE;
118 wmb();
119 add_wait_queue_exclusive(&sem->wait, &wait);
120
121 while (__sem_update_count(sem, -1) <= 0) {
122 if (signal_pending(current)) {
123 /*
124 * A signal is pending - give up trying.
125 * Set sem->count to 0 if it is negative,
126 * since we are no longer sleeping.
127 */
128 __sem_update_count(sem, 0);
129 ret = -EINTR;
130 break;
131 }
132 schedule();
133 set_task_state(tsk, TASK_INTERRUPTIBLE);
134 }
135
136 remove_wait_queue(&sem->wait, &wait);
137 tsk->state = TASK_RUNNING;
138 wake_up(&sem->wait);
139
140#ifdef CONFIG_DEBUG_SEMAPHORE
141 printk("%s(%d): down %s(%p)\n",
142 current->comm, task_pid_nr(current),
143 (ret < 0 ? "interrupted" : "acquired"), sem);
144#endif
145 return ret;
146}
147
148void
149__up_wakeup(struct semaphore *sem)
150{
151 /*
152 * Note that we incremented count in up() before we came here,
153 * but that was ineffective since the result was <= 0, and
154 * any negative value of count is equivalent to 0.
155 * This ends up setting count to 1, unless count is now > 0
156 * (i.e. because some other cpu has called up() in the meantime),
157 * in which case we just increment count.
158 */
159 __sem_update_count(sem, 1);
160 wake_up(&sem->wait);
161}
162
163void __sched
164down(struct semaphore *sem)
165{
166#ifdef WAITQUEUE_DEBUG
167 CHECK_MAGIC(sem->__magic);
168#endif
169#ifdef CONFIG_DEBUG_SEMAPHORE
170 printk("%s(%d): down(%p) <count=%d> from %p\n",
171 current->comm, task_pid_nr(current), sem,
172 atomic_read(&sem->count), __builtin_return_address(0));
173#endif
174 __down(sem);
175}
176
177int __sched
178down_interruptible(struct semaphore *sem)
179{
180#ifdef WAITQUEUE_DEBUG
181 CHECK_MAGIC(sem->__magic);
182#endif
183#ifdef CONFIG_DEBUG_SEMAPHORE
184 printk("%s(%d): down(%p) <count=%d> from %p\n",
185 current->comm, task_pid_nr(current), sem,
186 atomic_read(&sem->count), __builtin_return_address(0));
187#endif
188 return __down_interruptible(sem);
189}
190
191int
192down_trylock(struct semaphore *sem)
193{
194 int ret;
195
196#ifdef WAITQUEUE_DEBUG
197 CHECK_MAGIC(sem->__magic);
198#endif
199
200 ret = __down_trylock(sem);
201
202#ifdef CONFIG_DEBUG_SEMAPHORE
203 printk("%s(%d): down_trylock %s from %p\n",
204 current->comm, task_pid_nr(current),
205 ret ? "failed" : "acquired",
206 __builtin_return_address(0));
207#endif
208
209 return ret;
210}
211
212void
213up(struct semaphore *sem)
214{
215#ifdef WAITQUEUE_DEBUG
216 CHECK_MAGIC(sem->__magic);
217#endif
218#ifdef CONFIG_DEBUG_SEMAPHORE
219 printk("%s(%d): up(%p) <count=%d> from %p\n",
220 current->comm, task_pid_nr(current), sem,
221 atomic_read(&sem->count), __builtin_return_address(0));
222#endif
223 __up(sem);
224}
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 00d44c6fbfe9..6235f72a14f0 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
7# Object file lists. 7# Object file lists.
8 8
9obj-y := compat.o entry-armv.o entry-common.o irq.o \ 9obj-y := compat.o entry-armv.o entry-common.o irq.o \
10 process.o ptrace.o semaphore.o setup.o signal.o \ 10 process.o ptrace.o setup.o signal.o \
11 sys_arm.o stacktrace.o time.o traps.o 11 sys_arm.o stacktrace.o time.o traps.o
12 12
13obj-$(CONFIG_ISA_DMA_API) += dma.o 13obj-$(CONFIG_ISA_DMA_API) += dma.o
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c
deleted file mode 100644
index 981fe5c6ccbe..000000000000
--- a/arch/arm/kernel/semaphore.c
+++ /dev/null
@@ -1,221 +0,0 @@
1/*
2 * ARM semaphore implementation, taken from
3 *
4 * i386 semaphore implementation.
5 *
6 * (C) Copyright 1999 Linus Torvalds
7 *
8 * Modified for ARM by Russell King
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/errno.h>
17#include <linux/init.h>
18
19#include <asm/semaphore.h>
20
21/*
22 * Semaphores are implemented using a two-way counter:
23 * The "count" variable is decremented for each process
24 * that tries to acquire the semaphore, while the "sleeping"
25 * variable is a count of such acquires.
26 *
27 * Notably, the inline "up()" and "down()" functions can
28 * efficiently test if they need to do any extra work (up
29 * needs to do something only if count was negative before
30 * the increment operation.
31 *
32 * "sleeping" and the contention routine ordering is
33 * protected by the semaphore spinlock.
34 *
35 * Note that these functions are only called when there is
36 * contention on the lock, and as such all this is the
37 * "non-critical" part of the whole semaphore business. The
38 * critical part is the inline stuff in <asm/semaphore.h>
39 * where we want to avoid any extra jumps and calls.
40 */
41
42/*
43 * Logic:
44 * - only on a boundary condition do we need to care. When we go
45 * from a negative count to a non-negative, we wake people up.
46 * - when we go from a non-negative count to a negative do we
47 * (a) synchronize with the "sleeper" count and (b) make sure
48 * that we're on the wakeup list before we synchronize so that
49 * we cannot lose wakeup events.
50 */
51
52void __up(struct semaphore *sem)
53{
54 wake_up(&sem->wait);
55}
56
57static DEFINE_SPINLOCK(semaphore_lock);
58
59void __sched __down(struct semaphore * sem)
60{
61 struct task_struct *tsk = current;
62 DECLARE_WAITQUEUE(wait, tsk);
63 tsk->state = TASK_UNINTERRUPTIBLE;
64 add_wait_queue_exclusive(&sem->wait, &wait);
65
66 spin_lock_irq(&semaphore_lock);
67 sem->sleepers++;
68 for (;;) {
69 int sleepers = sem->sleepers;
70
71 /*
72 * Add "everybody else" into it. They aren't
73 * playing, because we own the spinlock.
74 */
75 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
76 sem->sleepers = 0;
77 break;
78 }
79 sem->sleepers = 1; /* us - see -1 above */
80 spin_unlock_irq(&semaphore_lock);
81
82 schedule();
83 tsk->state = TASK_UNINTERRUPTIBLE;
84 spin_lock_irq(&semaphore_lock);
85 }
86 spin_unlock_irq(&semaphore_lock);
87 remove_wait_queue(&sem->wait, &wait);
88 tsk->state = TASK_RUNNING;
89 wake_up(&sem->wait);
90}
91
92int __sched __down_interruptible(struct semaphore * sem)
93{
94 int retval = 0;
95 struct task_struct *tsk = current;
96 DECLARE_WAITQUEUE(wait, tsk);
97 tsk->state = TASK_INTERRUPTIBLE;
98 add_wait_queue_exclusive(&sem->wait, &wait);
99
100 spin_lock_irq(&semaphore_lock);
101 sem->sleepers ++;
102 for (;;) {
103 int sleepers = sem->sleepers;
104
105 /*
106 * With signals pending, this turns into
107 * the trylock failure case - we won't be
108 * sleeping, and we* can't get the lock as
109 * it has contention. Just correct the count
110 * and exit.
111 */
112 if (signal_pending(current)) {
113 retval = -EINTR;
114 sem->sleepers = 0;
115 atomic_add(sleepers, &sem->count);
116 break;
117 }
118
119 /*
120 * Add "everybody else" into it. They aren't
121 * playing, because we own the spinlock. The
122 * "-1" is because we're still hoping to get
123 * the lock.
124 */
125 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
126 sem->sleepers = 0;
127 break;
128 }
129 sem->sleepers = 1; /* us - see -1 above */
130 spin_unlock_irq(&semaphore_lock);
131
132 schedule();
133 tsk->state = TASK_INTERRUPTIBLE;
134 spin_lock_irq(&semaphore_lock);
135 }
136 spin_unlock_irq(&semaphore_lock);
137 tsk->state = TASK_RUNNING;
138 remove_wait_queue(&sem->wait, &wait);
139 wake_up(&sem->wait);
140 return retval;
141}
142
143/*
144 * Trylock failed - make sure we correct for
145 * having decremented the count.
146 *
147 * We could have done the trylock with a
148 * single "cmpxchg" without failure cases,
149 * but then it wouldn't work on a 386.
150 */
151int __down_trylock(struct semaphore * sem)
152{
153 int sleepers;
154 unsigned long flags;
155
156 spin_lock_irqsave(&semaphore_lock, flags);
157 sleepers = sem->sleepers + 1;
158 sem->sleepers = 0;
159
160 /*
161 * Add "everybody else" and us into it. They aren't
162 * playing, because we own the spinlock.
163 */
164 if (!atomic_add_negative(sleepers, &sem->count))
165 wake_up(&sem->wait);
166
167 spin_unlock_irqrestore(&semaphore_lock, flags);
168 return 1;
169}
170
171/*
172 * The semaphore operations have a special calling sequence that
173 * allow us to do a simpler in-line version of them. These routines
174 * need to convert that sequence back into the C sequence when
175 * there is contention on the semaphore.
176 *
177 * ip contains the semaphore pointer on entry. Save the C-clobbered
178 * registers (r0 to r3 and lr), but not ip, as we use it as a return
179 * value in some cases..
180 * To remain AAPCS compliant (64-bit stack align) we save r4 as well.
181 */
182asm(" .section .sched.text,\"ax\",%progbits \n\
183 .align 5 \n\
184 .globl __down_failed \n\
185__down_failed: \n\
186 stmfd sp!, {r0 - r4, lr} \n\
187 mov r0, ip \n\
188 bl __down \n\
189 ldmfd sp!, {r0 - r4, pc} \n\
190 \n\
191 .align 5 \n\
192 .globl __down_interruptible_failed \n\
193__down_interruptible_failed: \n\
194 stmfd sp!, {r0 - r4, lr} \n\
195 mov r0, ip \n\
196 bl __down_interruptible \n\
197 mov ip, r0 \n\
198 ldmfd sp!, {r0 - r4, pc} \n\
199 \n\
200 .align 5 \n\
201 .globl __down_trylock_failed \n\
202__down_trylock_failed: \n\
203 stmfd sp!, {r0 - r4, lr} \n\
204 mov r0, ip \n\
205 bl __down_trylock \n\
206 mov ip, r0 \n\
207 ldmfd sp!, {r0 - r4, pc} \n\
208 \n\
209 .align 5 \n\
210 .globl __up_wakeup \n\
211__up_wakeup: \n\
212 stmfd sp!, {r0 - r4, lr} \n\
213 mov r0, ip \n\
214 bl __up \n\
215 ldmfd sp!, {r0 - r4, pc} \n\
216 ");
217
218EXPORT_SYMBOL(__down_failed);
219EXPORT_SYMBOL(__down_interruptible_failed);
220EXPORT_SYMBOL(__down_trylock_failed);
221EXPORT_SYMBOL(__up_wakeup);
diff --git a/arch/avr32/kernel/Makefile b/arch/avr32/kernel/Makefile
index e4b6d122b033..18229d0d1861 100644
--- a/arch/avr32/kernel/Makefile
+++ b/arch/avr32/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds
6 6
7obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o 7obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o
8obj-y += syscall_table.o syscall-stubs.o irq.o 8obj-y += syscall_table.o syscall-stubs.o irq.o
9obj-y += setup.o traps.o semaphore.o ocd.o ptrace.o 9obj-y += setup.o traps.o ocd.o ptrace.o
10obj-y += signal.o sys_avr32.o process.o time.o 10obj-y += signal.o sys_avr32.o process.o time.o
11obj-y += init_task.o switch_to.o cpu.o 11obj-y += init_task.o switch_to.o cpu.o
12obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o 12obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o
diff --git a/arch/avr32/kernel/semaphore.c b/arch/avr32/kernel/semaphore.c
deleted file mode 100644
index 1e2705a05016..000000000000
--- a/arch/avr32/kernel/semaphore.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * AVR32 sempahore implementation.
3 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * Based on linux/arch/i386/kernel/semaphore.c
7 * Copyright (C) 1999 Linus Torvalds
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/module.h>
17
18#include <asm/semaphore.h>
19#include <asm/atomic.h>
20
21/*
22 * Semaphores are implemented using a two-way counter:
23 * The "count" variable is decremented for each process
24 * that tries to acquire the semaphore, while the "sleeping"
25 * variable is a count of such acquires.
26 *
27 * Notably, the inline "up()" and "down()" functions can
28 * efficiently test if they need to do any extra work (up
29 * needs to do something only if count was negative before
30 * the increment operation.
31 *
32 * "sleeping" and the contention routine ordering is protected
33 * by the spinlock in the semaphore's waitqueue head.
34 *
35 * Note that these functions are only called when there is
36 * contention on the lock, and as such all this is the
37 * "non-critical" part of the whole semaphore business. The
38 * critical part is the inline stuff in <asm/semaphore.h>
39 * where we want to avoid any extra jumps and calls.
40 */
41
42/*
43 * Logic:
44 * - only on a boundary condition do we need to care. When we go
45 * from a negative count to a non-negative, we wake people up.
46 * - when we go from a non-negative count to a negative do we
47 * (a) synchronize with the "sleeper" count and (b) make sure
48 * that we're on the wakeup list before we synchronize so that
49 * we cannot lose wakeup events.
50 */
51
52void __up(struct semaphore *sem)
53{
54 wake_up(&sem->wait);
55}
56EXPORT_SYMBOL(__up);
57
58void __sched __down(struct semaphore *sem)
59{
60 struct task_struct *tsk = current;
61 DECLARE_WAITQUEUE(wait, tsk);
62 unsigned long flags;
63
64 tsk->state = TASK_UNINTERRUPTIBLE;
65 spin_lock_irqsave(&sem->wait.lock, flags);
66 add_wait_queue_exclusive_locked(&sem->wait, &wait);
67
68 sem->sleepers++;
69 for (;;) {
70 int sleepers = sem->sleepers;
71
72 /*
73 * Add "everybody else" into it. They aren't
74 * playing, because we own the spinlock in
75 * the wait_queue_head.
76 */
77 if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
78 sem->sleepers = 0;
79 break;
80 }
81 sem->sleepers = 1; /* us - see -1 above */
82 spin_unlock_irqrestore(&sem->wait.lock, flags);
83
84 schedule();
85
86 spin_lock_irqsave(&sem->wait.lock, flags);
87 tsk->state = TASK_UNINTERRUPTIBLE;
88 }
89 remove_wait_queue_locked(&sem->wait, &wait);
90 wake_up_locked(&sem->wait);
91 spin_unlock_irqrestore(&sem->wait.lock, flags);
92 tsk->state = TASK_RUNNING;
93}
94EXPORT_SYMBOL(__down);
95
96int __sched __down_interruptible(struct semaphore *sem)
97{
98 int retval = 0;
99 struct task_struct *tsk = current;
100 DECLARE_WAITQUEUE(wait, tsk);
101 unsigned long flags;
102
103 tsk->state = TASK_INTERRUPTIBLE;
104 spin_lock_irqsave(&sem->wait.lock, flags);
105 add_wait_queue_exclusive_locked(&sem->wait, &wait);
106
107 sem->sleepers++;
108 for (;;) {
109 int sleepers = sem->sleepers;
110
111 /*
112 * With signals pending, this turns into the trylock
113 * failure case - we won't be sleeping, and we can't
114 * get the lock as it has contention. Just correct the
115 * count and exit.
116 */
117 if (signal_pending(current)) {
118 retval = -EINTR;
119 sem->sleepers = 0;
120 atomic_add(sleepers, &sem->count);
121 break;
122 }
123
124 /*
125 * Add "everybody else" into it. They aren't
126 * playing, because we own the spinlock in
127 * the wait_queue_head.
128 */
129 if (atomic_add_return(sleepers - 1, &sem->count) >= 0) {
130 sem->sleepers = 0;
131 break;
132 }
133 sem->sleepers = 1; /* us - see -1 above */
134 spin_unlock_irqrestore(&sem->wait.lock, flags);
135
136 schedule();
137
138 spin_lock_irqsave(&sem->wait.lock, flags);
139 tsk->state = TASK_INTERRUPTIBLE;
140 }
141 remove_wait_queue_locked(&sem->wait, &wait);
142 wake_up_locked(&sem->wait);
143 spin_unlock_irqrestore(&sem->wait.lock, flags);
144
145 tsk->state = TASK_RUNNING;
146 return retval;
147}
148EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 589c6aca4803..2dd1f300a5cf 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -31,10 +31,6 @@ config ZONE_DMA
31 bool 31 bool
32 default y 32 default y
33 33
34config SEMAPHORE_SLEEPERS
35 bool
36 default y
37
38config GENERIC_FIND_NEXT_BIT 34config GENERIC_FIND_NEXT_BIT
39 bool 35 bool
40 default y 36 default y
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 0bfbb269e350..053edff6c0d8 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -42,11 +42,6 @@ EXPORT_SYMBOL(ip_fast_csum);
42 42
43EXPORT_SYMBOL(kernel_thread); 43EXPORT_SYMBOL(kernel_thread);
44 44
45EXPORT_SYMBOL(__up);
46EXPORT_SYMBOL(__down);
47EXPORT_SYMBOL(__down_trylock);
48EXPORT_SYMBOL(__down_interruptible);
49
50EXPORT_SYMBOL(is_in_rom); 45EXPORT_SYMBOL(is_in_rom);
51EXPORT_SYMBOL(bfin_return_from_exception); 46EXPORT_SYMBOL(bfin_return_from_exception);
52 47
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile
index c8e8ea570989..ee7bcd4d20b2 100644
--- a/arch/cris/kernel/Makefile
+++ b/arch/cris/kernel/Makefile
@@ -5,8 +5,7 @@
5 5
6extra-y := vmlinux.lds 6extra-y := vmlinux.lds
7 7
8obj-y := process.o traps.o irq.o ptrace.o setup.o \ 8obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
9 time.o sys_cris.o semaphore.o
10 9
11obj-$(CONFIG_MODULES) += crisksyms.o 10obj-$(CONFIG_MODULES) += crisksyms.o
12obj-$(CONFIG_MODULES) += module.o 11obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c
index 62f0e752915a..7ac000f6a888 100644
--- a/arch/cris/kernel/crisksyms.c
+++ b/arch/cris/kernel/crisksyms.c
@@ -9,7 +9,6 @@
9#include <linux/string.h> 9#include <linux/string.h>
10#include <linux/tty.h> 10#include <linux/tty.h>
11 11
12#include <asm/semaphore.h>
13#include <asm/processor.h> 12#include <asm/processor.h>
14#include <asm/uaccess.h> 13#include <asm/uaccess.h>
15#include <asm/checksum.h> 14#include <asm/checksum.h>
@@ -49,12 +48,6 @@ EXPORT_SYMBOL(__negdi2);
49EXPORT_SYMBOL(__ioremap); 48EXPORT_SYMBOL(__ioremap);
50EXPORT_SYMBOL(iounmap); 49EXPORT_SYMBOL(iounmap);
51 50
52/* Semaphore functions */
53EXPORT_SYMBOL(__up);
54EXPORT_SYMBOL(__down);
55EXPORT_SYMBOL(__down_interruptible);
56EXPORT_SYMBOL(__down_trylock);
57
58/* Userspace access functions */ 51/* Userspace access functions */
59EXPORT_SYMBOL(__copy_user_zeroing); 52EXPORT_SYMBOL(__copy_user_zeroing);
60EXPORT_SYMBOL(__copy_user); 53EXPORT_SYMBOL(__copy_user);
diff --git a/arch/cris/kernel/semaphore.c b/arch/cris/kernel/semaphore.c
deleted file mode 100644
index f137a439041f..000000000000
--- a/arch/cris/kernel/semaphore.c
+++ /dev/null
@@ -1,129 +0,0 @@
1/*
2 * Generic semaphore code. Buyer beware. Do your own
3 * specific changes in <asm/semaphore-helper.h>
4 */
5
6#include <linux/sched.h>
7#include <asm/semaphore-helper.h>
8
9/*
10 * Semaphores are implemented using a two-way counter:
11 * The "count" variable is decremented for each process
12 * that tries to sleep, while the "waking" variable is
13 * incremented when the "up()" code goes to wake up waiting
14 * processes.
15 *
16 * Notably, the inline "up()" and "down()" functions can
17 * efficiently test if they need to do any extra work (up
18 * needs to do something only if count was negative before
19 * the increment operation.
20 *
21 * waking_non_zero() (from asm/semaphore.h) must execute
22 * atomically.
23 *
24 * When __up() is called, the count was negative before
25 * incrementing it, and we need to wake up somebody.
26 *
27 * This routine adds one to the count of processes that need to
28 * wake up and exit. ALL waiting processes actually wake up but
29 * only the one that gets to the "waking" field first will gate
30 * through and acquire the semaphore. The others will go back
31 * to sleep.
32 *
33 * Note that these functions are only called when there is
34 * contention on the lock, and as such all this is the
35 * "non-critical" part of the whole semaphore business. The
36 * critical part is the inline stuff in <asm/semaphore.h>
37 * where we want to avoid any extra jumps and calls.
38 */
39void __up(struct semaphore *sem)
40{
41 wake_one_more(sem);
42 wake_up(&sem->wait);
43}
44
45/*
46 * Perform the "down" function. Return zero for semaphore acquired,
47 * return negative for signalled out of the function.
48 *
49 * If called from __down, the return is ignored and the wait loop is
50 * not interruptible. This means that a task waiting on a semaphore
51 * using "down()" cannot be killed until someone does an "up()" on
52 * the semaphore.
53 *
54 * If called from __down_interruptible, the return value gets checked
55 * upon return. If the return value is negative then the task continues
56 * with the negative value in the return register (it can be tested by
57 * the caller).
58 *
59 * Either form may be used in conjunction with "up()".
60 *
61 */
62
63#define DOWN_VAR \
64 struct task_struct *tsk = current; \
65 wait_queue_t wait; \
66 init_waitqueue_entry(&wait, tsk);
67
68#define DOWN_HEAD(task_state) \
69 \
70 \
71 tsk->state = (task_state); \
72 add_wait_queue(&sem->wait, &wait); \
73 \
74 /* \
75 * Ok, we're set up. sem->count is known to be less than zero \
76 * so we must wait. \
77 * \
78 * We can let go the lock for purposes of waiting. \
79 * We re-acquire it after awaking so as to protect \
80 * all semaphore operations. \
81 * \
82 * If "up()" is called before we call waking_non_zero() then \
83 * we will catch it right away. If it is called later then \
84 * we will have to go through a wakeup cycle to catch it. \
85 * \
86 * Multiple waiters contend for the semaphore lock to see \
87 * who gets to gate through and who has to wait some more. \
88 */ \
89 for (;;) {
90
91#define DOWN_TAIL(task_state) \
92 tsk->state = (task_state); \
93 } \
94 tsk->state = TASK_RUNNING; \
95 remove_wait_queue(&sem->wait, &wait);
96
97void __sched __down(struct semaphore * sem)
98{
99 DOWN_VAR
100 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
101 if (waking_non_zero(sem))
102 break;
103 schedule();
104 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
105}
106
107int __sched __down_interruptible(struct semaphore * sem)
108{
109 int ret = 0;
110 DOWN_VAR
111 DOWN_HEAD(TASK_INTERRUPTIBLE)
112
113 ret = waking_non_zero_interruptible(sem, tsk);
114 if (ret)
115 {
116 if (ret == 1)
117 /* ret != 0 only if we get interrupted -arca */
118 ret = 0;
119 break;
120 }
121 schedule();
122 DOWN_TAIL(TASK_INTERRUPTIBLE)
123 return ret;
124}
125
126int __down_trylock(struct semaphore * sem)
127{
128 return waking_non_zero_trylock(sem);
129}
diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile
index e8f73ed28b52..c36f70b6699a 100644
--- a/arch/frv/kernel/Makefile
+++ b/arch/frv/kernel/Makefile
@@ -9,7 +9,7 @@ extra-y:= head.o init_task.o vmlinux.lds
9 9
10obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \ 10obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \
11 kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \ 11 kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \
12 sys_frv.o time.o semaphore.o setup.o frv_ksyms.o \ 12 sys_frv.o time.o setup.o frv_ksyms.o \
13 debug-stub.o irq.o sleep.o uaccess.o 13 debug-stub.o irq.o sleep.o uaccess.o
14 14
15obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o 15obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c
index f772704b3d28..0316b3c50eff 100644
--- a/arch/frv/kernel/frv_ksyms.c
+++ b/arch/frv/kernel/frv_ksyms.c
@@ -12,7 +12,6 @@
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm/irq.h> 13#include <asm/irq.h>
14#include <asm/io.h> 14#include <asm/io.h>
15#include <asm/semaphore.h>
16#include <asm/checksum.h> 15#include <asm/checksum.h>
17#include <asm/hardirq.h> 16#include <asm/hardirq.h>
18#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c
deleted file mode 100644
index 7ee3a147b471..000000000000
--- a/arch/frv/kernel/semaphore.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/* semaphore.c: FR-V semaphores
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from lib/rwsem-spinlock.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/sched.h>
14#include <linux/module.h>
15#include <asm/semaphore.h>
16
17struct sem_waiter {
18 struct list_head list;
19 struct task_struct *task;
20};
21
22#ifdef CONFIG_DEBUG_SEMAPHORE
23void semtrace(struct semaphore *sem, const char *str)
24{
25 if (sem->debug)
26 printk("[%d] %s({%d,%d})\n",
27 current->pid,
28 str,
29 sem->counter,
30 list_empty(&sem->wait_list) ? 0 : 1);
31}
32#else
33#define semtrace(SEM,STR) do { } while(0)
34#endif
35
36/*
37 * wait for a token to be granted from a semaphore
38 * - entered with lock held and interrupts disabled
39 */
40void __down(struct semaphore *sem, unsigned long flags)
41{
42 struct task_struct *tsk = current;
43 struct sem_waiter waiter;
44
45 semtrace(sem, "Entering __down");
46
47 /* set up my own style of waitqueue */
48 waiter.task = tsk;
49 get_task_struct(tsk);
50
51 list_add_tail(&waiter.list, &sem->wait_list);
52
53 /* we don't need to touch the semaphore struct anymore */
54 spin_unlock_irqrestore(&sem->wait_lock, flags);
55
56 /* wait to be given the semaphore */
57 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
58
59 for (;;) {
60 if (list_empty(&waiter.list))
61 break;
62 schedule();
63 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
64 }
65
66 tsk->state = TASK_RUNNING;
67 semtrace(sem, "Leaving __down");
68}
69
70EXPORT_SYMBOL(__down);
71
72/*
73 * interruptibly wait for a token to be granted from a semaphore
74 * - entered with lock held and interrupts disabled
75 */
76int __down_interruptible(struct semaphore *sem, unsigned long flags)
77{
78 struct task_struct *tsk = current;
79 struct sem_waiter waiter;
80 int ret;
81
82 semtrace(sem,"Entering __down_interruptible");
83
84 /* set up my own style of waitqueue */
85 waiter.task = tsk;
86 get_task_struct(tsk);
87
88 list_add_tail(&waiter.list, &sem->wait_list);
89
90 /* we don't need to touch the semaphore struct anymore */
91 set_task_state(tsk, TASK_INTERRUPTIBLE);
92
93 spin_unlock_irqrestore(&sem->wait_lock, flags);
94
95 /* wait to be given the semaphore */
96 ret = 0;
97 for (;;) {
98 if (list_empty(&waiter.list))
99 break;
100 if (unlikely(signal_pending(current)))
101 goto interrupted;
102 schedule();
103 set_task_state(tsk, TASK_INTERRUPTIBLE);
104 }
105
106 out:
107 tsk->state = TASK_RUNNING;
108 semtrace(sem, "Leaving __down_interruptible");
109 return ret;
110
111 interrupted:
112 spin_lock_irqsave(&sem->wait_lock, flags);
113
114 if (!list_empty(&waiter.list)) {
115 list_del(&waiter.list);
116 ret = -EINTR;
117 }
118
119 spin_unlock_irqrestore(&sem->wait_lock, flags);
120 if (ret == -EINTR)
121 put_task_struct(current);
122 goto out;
123}
124
125EXPORT_SYMBOL(__down_interruptible);
126
127/*
128 * release a single token back to a semaphore
129 * - entered with lock held and interrupts disabled
130 */
131void __up(struct semaphore *sem)
132{
133 struct task_struct *tsk;
134 struct sem_waiter *waiter;
135
136 semtrace(sem,"Entering __up");
137
138 /* grant the token to the process at the front of the queue */
139 waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
140
141 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
142 * It is allocated on the waiter's stack and may become invalid at
143 * any time after that point (due to a wakeup from another source).
144 */
145 list_del_init(&waiter->list);
146 tsk = waiter->task;
147 mb();
148 waiter->task = NULL;
149 wake_up_process(tsk);
150 put_task_struct(tsk);
151
152 semtrace(sem,"Leaving __up");
153}
154
155EXPORT_SYMBOL(__up);
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile
index 874f6aefee65..6c248c3c5c3b 100644
--- a/arch/h8300/kernel/Makefile
+++ b/arch/h8300/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := vmlinux.lds 5extra-y := vmlinux.lds
6 6
7obj-y := process.o traps.o ptrace.o irq.o \ 7obj-y := process.o traps.o ptrace.o irq.o \
8 sys_h8300.o time.o semaphore.o signal.o \ 8 sys_h8300.o time.o signal.o \
9 setup.o gpio.o init_task.o syscalls.o \ 9 setup.o gpio.o init_task.o syscalls.o \
10 entry.o 10 entry.o
11 11
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index d1b15267ac81..6866bd9c7fb4 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -12,7 +12,6 @@
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm/irq.h> 13#include <asm/irq.h>
14#include <asm/io.h> 14#include <asm/io.h>
15#include <asm/semaphore.h>
16#include <asm/checksum.h> 15#include <asm/checksum.h>
17#include <asm/current.h> 16#include <asm/current.h>
18#include <asm/gpio.h> 17#include <asm/gpio.h>
diff --git a/arch/h8300/kernel/semaphore.c b/arch/h8300/kernel/semaphore.c
deleted file mode 100644
index d12cbbfe6ebd..000000000000
--- a/arch/h8300/kernel/semaphore.c
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Generic semaphore code. Buyer beware. Do your own
3 * specific changes in <asm/semaphore-helper.h>
4 */
5
6#include <linux/sched.h>
7#include <linux/init.h>
8#include <asm/semaphore-helper.h>
9
10#ifndef CONFIG_RMW_INSNS
11spinlock_t semaphore_wake_lock;
12#endif
13
14/*
15 * Semaphores are implemented using a two-way counter:
16 * The "count" variable is decremented for each process
17 * that tries to sleep, while the "waking" variable is
18 * incremented when the "up()" code goes to wake up waiting
19 * processes.
20 *
21 * Notably, the inline "up()" and "down()" functions can
22 * efficiently test if they need to do any extra work (up
23 * needs to do something only if count was negative before
24 * the increment operation.
25 *
26 * waking_non_zero() (from asm/semaphore.h) must execute
27 * atomically.
28 *
29 * When __up() is called, the count was negative before
30 * incrementing it, and we need to wake up somebody.
31 *
32 * This routine adds one to the count of processes that need to
33 * wake up and exit. ALL waiting processes actually wake up but
34 * only the one that gets to the "waking" field first will gate
35 * through and acquire the semaphore. The others will go back
36 * to sleep.
37 *
38 * Note that these functions are only called when there is
39 * contention on the lock, and as such all this is the
40 * "non-critical" part of the whole semaphore business. The
41 * critical part is the inline stuff in <asm/semaphore.h>
42 * where we want to avoid any extra jumps and calls.
43 */
44void __up(struct semaphore *sem)
45{
46 wake_one_more(sem);
47 wake_up(&sem->wait);
48}
49
50/*
51 * Perform the "down" function. Return zero for semaphore acquired,
52 * return negative for signalled out of the function.
53 *
54 * If called from __down, the return is ignored and the wait loop is
55 * not interruptible. This means that a task waiting on a semaphore
56 * using "down()" cannot be killed until someone does an "up()" on
57 * the semaphore.
58 *
59 * If called from __down_interruptible, the return value gets checked
60 * upon return. If the return value is negative then the task continues
61 * with the negative value in the return register (it can be tested by
62 * the caller).
63 *
64 * Either form may be used in conjunction with "up()".
65 *
66 */
67
68
69#define DOWN_HEAD(task_state) \
70 \
71 \
72 current->state = (task_state); \
73 add_wait_queue(&sem->wait, &wait); \
74 \
75 /* \
76 * Ok, we're set up. sem->count is known to be less than zero \
77 * so we must wait. \
78 * \
79 * We can let go the lock for purposes of waiting. \
80 * We re-acquire it after awaking so as to protect \
81 * all semaphore operations. \
82 * \
83 * If "up()" is called before we call waking_non_zero() then \
84 * we will catch it right away. If it is called later then \
85 * we will have to go through a wakeup cycle to catch it. \
86 * \
87 * Multiple waiters contend for the semaphore lock to see \
88 * who gets to gate through and who has to wait some more. \
89 */ \
90 for (;;) {
91
92#define DOWN_TAIL(task_state) \
93 current->state = (task_state); \
94 } \
95 current->state = TASK_RUNNING; \
96 remove_wait_queue(&sem->wait, &wait);
97
98void __sched __down(struct semaphore * sem)
99{
100 DECLARE_WAITQUEUE(wait, current);
101
102 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
103 if (waking_non_zero(sem))
104 break;
105 schedule();
106 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
107}
108
109int __sched __down_interruptible(struct semaphore * sem)
110{
111 DECLARE_WAITQUEUE(wait, current);
112 int ret = 0;
113
114 DOWN_HEAD(TASK_INTERRUPTIBLE)
115
116 ret = waking_non_zero_interruptible(sem, current);
117 if (ret)
118 {
119 if (ret == 1)
120 /* ret != 0 only if we get interrupted -arca */
121 ret = 0;
122 break;
123 }
124 schedule();
125 DOWN_TAIL(TASK_INTERRUPTIBLE)
126 return ret;
127}
128
129int __down_trylock(struct semaphore * sem)
130{
131 return waking_non_zero_trylock(sem);
132}
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 33e5a598672d..13fd10e8699e 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
9 salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
10 unwind.o mca.o mca_asm.o topology.o 10 unwind.o mca.o mca_asm.o topology.o
11 11
12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 8e7193d55528..6da1f20d7372 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -19,12 +19,6 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
19EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 19EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
20EXPORT_SYMBOL(csum_ipv6_magic); 20EXPORT_SYMBOL(csum_ipv6_magic);
21 21
22#include <asm/semaphore.h>
23EXPORT_SYMBOL(__down);
24EXPORT_SYMBOL(__down_interruptible);
25EXPORT_SYMBOL(__down_trylock);
26EXPORT_SYMBOL(__up);
27
28#include <asm/page.h> 22#include <asm/page.h>
29EXPORT_SYMBOL(clear_page); 23EXPORT_SYMBOL(clear_page);
30 24
diff --git a/arch/ia64/kernel/semaphore.c b/arch/ia64/kernel/semaphore.c
deleted file mode 100644
index 2724ef3fbae2..000000000000
--- a/arch/ia64/kernel/semaphore.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * IA-64 semaphore implementation (derived from x86 version).
3 *
4 * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7
8/*
9 * Semaphores are implemented using a two-way counter: The "count"
10 * variable is decremented for each process that tries to acquire the
11 * semaphore, while the "sleepers" variable is a count of such
12 * acquires.
13 *
14 * Notably, the inline "up()" and "down()" functions can efficiently
15 * test if they need to do any extra work (up needs to do something
16 * only if count was negative before the increment operation.
17 *
18 * "sleeping" and the contention routine ordering is protected
19 * by the spinlock in the semaphore's waitqueue head.
20 *
21 * Note that these functions are only called when there is contention
22 * on the lock, and as such all this is the "non-critical" part of the
23 * whole semaphore business. The critical part is the inline stuff in
24 * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
25 */
26#include <linux/sched.h>
27#include <linux/init.h>
28
29#include <asm/errno.h>
30#include <asm/semaphore.h>
31
32/*
33 * Logic:
34 * - Only on a boundary condition do we need to care. When we go
35 * from a negative count to a non-negative, we wake people up.
36 * - When we go from a non-negative count to a negative do we
37 * (a) synchronize with the "sleepers" count and (b) make sure
38 * that we're on the wakeup list before we synchronize so that
39 * we cannot lose wakeup events.
40 */
41
42void
43__up (struct semaphore *sem)
44{
45 wake_up(&sem->wait);
46}
47
48void __sched __down (struct semaphore *sem)
49{
50 struct task_struct *tsk = current;
51 DECLARE_WAITQUEUE(wait, tsk);
52 unsigned long flags;
53
54 tsk->state = TASK_UNINTERRUPTIBLE;
55 spin_lock_irqsave(&sem->wait.lock, flags);
56 add_wait_queue_exclusive_locked(&sem->wait, &wait);
57
58 sem->sleepers++;
59 for (;;) {
60 int sleepers = sem->sleepers;
61
62 /*
63 * Add "everybody else" into it. They aren't
64 * playing, because we own the spinlock in
65 * the wait_queue_head.
66 */
67 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
68 sem->sleepers = 0;
69 break;
70 }
71 sem->sleepers = 1; /* us - see -1 above */
72 spin_unlock_irqrestore(&sem->wait.lock, flags);
73
74 schedule();
75
76 spin_lock_irqsave(&sem->wait.lock, flags);
77 tsk->state = TASK_UNINTERRUPTIBLE;
78 }
79 remove_wait_queue_locked(&sem->wait, &wait);
80 wake_up_locked(&sem->wait);
81 spin_unlock_irqrestore(&sem->wait.lock, flags);
82 tsk->state = TASK_RUNNING;
83}
84
85int __sched __down_interruptible (struct semaphore * sem)
86{
87 int retval = 0;
88 struct task_struct *tsk = current;
89 DECLARE_WAITQUEUE(wait, tsk);
90 unsigned long flags;
91
92 tsk->state = TASK_INTERRUPTIBLE;
93 spin_lock_irqsave(&sem->wait.lock, flags);
94 add_wait_queue_exclusive_locked(&sem->wait, &wait);
95
96 sem->sleepers ++;
97 for (;;) {
98 int sleepers = sem->sleepers;
99
100 /*
101 * With signals pending, this turns into
102 * the trylock failure case - we won't be
103 * sleeping, and we* can't get the lock as
104 * it has contention. Just correct the count
105 * and exit.
106 */
107 if (signal_pending(current)) {
108 retval = -EINTR;
109 sem->sleepers = 0;
110 atomic_add(sleepers, &sem->count);
111 break;
112 }
113
114 /*
115 * Add "everybody else" into it. They aren't
116 * playing, because we own the spinlock in
117 * wait_queue_head. The "-1" is because we're
118 * still hoping to get the semaphore.
119 */
120 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
121 sem->sleepers = 0;
122 break;
123 }
124 sem->sleepers = 1; /* us - see -1 above */
125 spin_unlock_irqrestore(&sem->wait.lock, flags);
126
127 schedule();
128
129 spin_lock_irqsave(&sem->wait.lock, flags);
130 tsk->state = TASK_INTERRUPTIBLE;
131 }
132 remove_wait_queue_locked(&sem->wait, &wait);
133 wake_up_locked(&sem->wait);
134 spin_unlock_irqrestore(&sem->wait.lock, flags);
135
136 tsk->state = TASK_RUNNING;
137 return retval;
138}
139
140/*
141 * Trylock failed - make sure we correct for having decremented the
142 * count.
143 */
144int
145__down_trylock (struct semaphore *sem)
146{
147 unsigned long flags;
148 int sleepers;
149
150 spin_lock_irqsave(&sem->wait.lock, flags);
151 sleepers = sem->sleepers + 1;
152 sem->sleepers = 0;
153
154 /*
155 * Add "everybody else" and us into it. They aren't
156 * playing, because we own the spinlock in the
157 * wait_queue_head.
158 */
159 if (!atomic_add_negative(sleepers, &sem->count)) {
160 wake_up_locked(&sem->wait);
161 }
162
163 spin_unlock_irqrestore(&sem->wait.lock, flags);
164 return 1;
165}
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile
index e97e26e87c9e..09200d4886e3 100644
--- a/arch/m32r/kernel/Makefile
+++ b/arch/m32r/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := head.o init_task.o vmlinux.lds 5extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \ 7obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \
8 m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o 8 m32r_ksyms.o sys_m32r.o signal.o ptrace.o
9 9
10obj-$(CONFIG_SMP) += smp.o smpboot.o 10obj-$(CONFIG_SMP) += smp.o smpboot.o
11obj-$(CONFIG_MODULES) += module.o 11obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c
index 41a4c95e06d6..e6709fe950ba 100644
--- a/arch/m32r/kernel/m32r_ksyms.c
+++ b/arch/m32r/kernel/m32r_ksyms.c
@@ -7,7 +7,6 @@
7#include <linux/interrupt.h> 7#include <linux/interrupt.h>
8#include <linux/string.h> 8#include <linux/string.h>
9 9
10#include <asm/semaphore.h>
11#include <asm/processor.h> 10#include <asm/processor.h>
12#include <asm/uaccess.h> 11#include <asm/uaccess.h>
13#include <asm/checksum.h> 12#include <asm/checksum.h>
@@ -22,10 +21,6 @@ EXPORT_SYMBOL(dump_fpu);
22EXPORT_SYMBOL(__ioremap); 21EXPORT_SYMBOL(__ioremap);
23EXPORT_SYMBOL(iounmap); 22EXPORT_SYMBOL(iounmap);
24EXPORT_SYMBOL(kernel_thread); 23EXPORT_SYMBOL(kernel_thread);
25EXPORT_SYMBOL(__down);
26EXPORT_SYMBOL(__down_interruptible);
27EXPORT_SYMBOL(__up);
28EXPORT_SYMBOL(__down_trylock);
29 24
30/* Networking helper routines. */ 25/* Networking helper routines. */
31/* Delay loops */ 26/* Delay loops */
diff --git a/arch/m32r/kernel/semaphore.c b/arch/m32r/kernel/semaphore.c
deleted file mode 100644
index 940c2d37cfd1..000000000000
--- a/arch/m32r/kernel/semaphore.c
+++ /dev/null
@@ -1,185 +0,0 @@
1/*
2 * linux/arch/m32r/semaphore.c
3 * orig : i386 2.6.4
4 *
5 * M32R semaphore implementation.
6 *
7 * Copyright (c) 2002 - 2004 Hitoshi Yamamoto
8 */
9
10/*
11 * i386 semaphore implementation.
12 *
13 * (C) Copyright 1999 Linus Torvalds
14 *
15 * Portions Copyright 1999 Red Hat, Inc.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
23 */
24#include <linux/sched.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <asm/semaphore.h>
28
29/*
30 * Semaphores are implemented using a two-way counter:
31 * The "count" variable is decremented for each process
32 * that tries to acquire the semaphore, while the "sleeping"
33 * variable is a count of such acquires.
34 *
35 * Notably, the inline "up()" and "down()" functions can
36 * efficiently test if they need to do any extra work (up
37 * needs to do something only if count was negative before
38 * the increment operation.
39 *
40 * "sleeping" and the contention routine ordering is protected
41 * by the spinlock in the semaphore's waitqueue head.
42 *
43 * Note that these functions are only called when there is
44 * contention on the lock, and as such all this is the
45 * "non-critical" part of the whole semaphore business. The
46 * critical part is the inline stuff in <asm/semaphore.h>
47 * where we want to avoid any extra jumps and calls.
48 */
49
50/*
51 * Logic:
52 * - only on a boundary condition do we need to care. When we go
53 * from a negative count to a non-negative, we wake people up.
54 * - when we go from a non-negative count to a negative do we
55 * (a) synchronize with the "sleeper" count and (b) make sure
56 * that we're on the wakeup list before we synchronize so that
57 * we cannot lose wakeup events.
58 */
59
60asmlinkage void __up(struct semaphore *sem)
61{
62 wake_up(&sem->wait);
63}
64
65asmlinkage void __sched __down(struct semaphore * sem)
66{
67 struct task_struct *tsk = current;
68 DECLARE_WAITQUEUE(wait, tsk);
69 unsigned long flags;
70
71 tsk->state = TASK_UNINTERRUPTIBLE;
72 spin_lock_irqsave(&sem->wait.lock, flags);
73 add_wait_queue_exclusive_locked(&sem->wait, &wait);
74
75 sem->sleepers++;
76 for (;;) {
77 int sleepers = sem->sleepers;
78
79 /*
80 * Add "everybody else" into it. They aren't
81 * playing, because we own the spinlock in
82 * the wait_queue_head.
83 */
84 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
85 sem->sleepers = 0;
86 break;
87 }
88 sem->sleepers = 1; /* us - see -1 above */
89 spin_unlock_irqrestore(&sem->wait.lock, flags);
90
91 schedule();
92
93 spin_lock_irqsave(&sem->wait.lock, flags);
94 tsk->state = TASK_UNINTERRUPTIBLE;
95 }
96 remove_wait_queue_locked(&sem->wait, &wait);
97 wake_up_locked(&sem->wait);
98 spin_unlock_irqrestore(&sem->wait.lock, flags);
99 tsk->state = TASK_RUNNING;
100}
101
102asmlinkage int __sched __down_interruptible(struct semaphore * sem)
103{
104 int retval = 0;
105 struct task_struct *tsk = current;
106 DECLARE_WAITQUEUE(wait, tsk);
107 unsigned long flags;
108
109 tsk->state = TASK_INTERRUPTIBLE;
110 spin_lock_irqsave(&sem->wait.lock, flags);
111 add_wait_queue_exclusive_locked(&sem->wait, &wait);
112
113 sem->sleepers++;
114 for (;;) {
115 int sleepers = sem->sleepers;
116
117 /*
118 * With signals pending, this turns into
119 * the trylock failure case - we won't be
120 * sleeping, and we* can't get the lock as
121 * it has contention. Just correct the count
122 * and exit.
123 */
124 if (signal_pending(current)) {
125 retval = -EINTR;
126 sem->sleepers = 0;
127 atomic_add(sleepers, &sem->count);
128 break;
129 }
130
131 /*
132 * Add "everybody else" into it. They aren't
133 * playing, because we own the spinlock in
134 * wait_queue_head. The "-1" is because we're
135 * still hoping to get the semaphore.
136 */
137 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
138 sem->sleepers = 0;
139 break;
140 }
141 sem->sleepers = 1; /* us - see -1 above */
142 spin_unlock_irqrestore(&sem->wait.lock, flags);
143
144 schedule();
145
146 spin_lock_irqsave(&sem->wait.lock, flags);
147 tsk->state = TASK_INTERRUPTIBLE;
148 }
149 remove_wait_queue_locked(&sem->wait, &wait);
150 wake_up_locked(&sem->wait);
151 spin_unlock_irqrestore(&sem->wait.lock, flags);
152
153 tsk->state = TASK_RUNNING;
154 return retval;
155}
156
157/*
158 * Trylock failed - make sure we correct for
159 * having decremented the count.
160 *
161 * We could have done the trylock with a
162 * single "cmpxchg" without failure cases,
163 * but then it wouldn't work on a 386.
164 */
165asmlinkage int __down_trylock(struct semaphore * sem)
166{
167 int sleepers;
168 unsigned long flags;
169
170 spin_lock_irqsave(&sem->wait.lock, flags);
171 sleepers = sem->sleepers + 1;
172 sem->sleepers = 0;
173
174 /*
175 * Add "everybody else" and us into it. They aren't
176 * playing, because we own the spinlock in the
177 * wait_queue_head.
178 */
179 if (!atomic_add_negative(sleepers, &sem->count)) {
180 wake_up_locked(&sem->wait);
181 }
182
183 spin_unlock_irqrestore(&sem->wait.lock, flags);
184 return 1;
185}
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index a806208c7fb5..7a62a718143b 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -10,7 +10,7 @@ endif
10extra-y += vmlinux.lds 10extra-y += vmlinux.lds
11 11
12obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ 12obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
13 sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o 13 sys_m68k.o time.o setup.o m68k_ksyms.o devres.o
14 14
15devres-y = ../../../kernel/irq/devres.o 15devres-y = ../../../kernel/irq/devres.o
16 16
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 6fc69c74fe2e..d900e77e5363 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -1,5 +1,4 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <asm/semaphore.h>
3 2
4asmlinkage long long __ashldi3 (long long, int); 3asmlinkage long long __ashldi3 (long long, int);
5asmlinkage long long __ashrdi3 (long long, int); 4asmlinkage long long __ashrdi3 (long long, int);
@@ -15,8 +14,3 @@ EXPORT_SYMBOL(__ashrdi3);
15EXPORT_SYMBOL(__lshrdi3); 14EXPORT_SYMBOL(__lshrdi3);
16EXPORT_SYMBOL(__muldi3); 15EXPORT_SYMBOL(__muldi3);
17 16
18EXPORT_SYMBOL(__down_failed);
19EXPORT_SYMBOL(__down_failed_interruptible);
20EXPORT_SYMBOL(__down_failed_trylock);
21EXPORT_SYMBOL(__up_wakeup);
22
diff --git a/arch/m68k/kernel/semaphore.c b/arch/m68k/kernel/semaphore.c
deleted file mode 100644
index d12cbbfe6ebd..000000000000
--- a/arch/m68k/kernel/semaphore.c
+++ /dev/null
@@ -1,132 +0,0 @@
1/*
2 * Generic semaphore code. Buyer beware. Do your own
3 * specific changes in <asm/semaphore-helper.h>
4 */
5
6#include <linux/sched.h>
7#include <linux/init.h>
8#include <asm/semaphore-helper.h>
9
10#ifndef CONFIG_RMW_INSNS
11spinlock_t semaphore_wake_lock;
12#endif
13
14/*
15 * Semaphores are implemented using a two-way counter:
16 * The "count" variable is decremented for each process
17 * that tries to sleep, while the "waking" variable is
18 * incremented when the "up()" code goes to wake up waiting
19 * processes.
20 *
21 * Notably, the inline "up()" and "down()" functions can
22 * efficiently test if they need to do any extra work (up
23 * needs to do something only if count was negative before
24 * the increment operation.
25 *
26 * waking_non_zero() (from asm/semaphore.h) must execute
27 * atomically.
28 *
29 * When __up() is called, the count was negative before
30 * incrementing it, and we need to wake up somebody.
31 *
32 * This routine adds one to the count of processes that need to
33 * wake up and exit. ALL waiting processes actually wake up but
34 * only the one that gets to the "waking" field first will gate
35 * through and acquire the semaphore. The others will go back
36 * to sleep.
37 *
38 * Note that these functions are only called when there is
39 * contention on the lock, and as such all this is the
40 * "non-critical" part of the whole semaphore business. The
41 * critical part is the inline stuff in <asm/semaphore.h>
42 * where we want to avoid any extra jumps and calls.
43 */
44void __up(struct semaphore *sem)
45{
46 wake_one_more(sem);
47 wake_up(&sem->wait);
48}
49
50/*
51 * Perform the "down" function. Return zero for semaphore acquired,
52 * return negative for signalled out of the function.
53 *
54 * If called from __down, the return is ignored and the wait loop is
55 * not interruptible. This means that a task waiting on a semaphore
56 * using "down()" cannot be killed until someone does an "up()" on
57 * the semaphore.
58 *
59 * If called from __down_interruptible, the return value gets checked
60 * upon return. If the return value is negative then the task continues
61 * with the negative value in the return register (it can be tested by
62 * the caller).
63 *
64 * Either form may be used in conjunction with "up()".
65 *
66 */
67
68
69#define DOWN_HEAD(task_state) \
70 \
71 \
72 current->state = (task_state); \
73 add_wait_queue(&sem->wait, &wait); \
74 \
75 /* \
76 * Ok, we're set up. sem->count is known to be less than zero \
77 * so we must wait. \
78 * \
79 * We can let go the lock for purposes of waiting. \
80 * We re-acquire it after awaking so as to protect \
81 * all semaphore operations. \
82 * \
83 * If "up()" is called before we call waking_non_zero() then \
84 * we will catch it right away. If it is called later then \
85 * we will have to go through a wakeup cycle to catch it. \
86 * \
87 * Multiple waiters contend for the semaphore lock to see \
88 * who gets to gate through and who has to wait some more. \
89 */ \
90 for (;;) {
91
92#define DOWN_TAIL(task_state) \
93 current->state = (task_state); \
94 } \
95 current->state = TASK_RUNNING; \
96 remove_wait_queue(&sem->wait, &wait);
97
98void __sched __down(struct semaphore * sem)
99{
100 DECLARE_WAITQUEUE(wait, current);
101
102 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
103 if (waking_non_zero(sem))
104 break;
105 schedule();
106 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
107}
108
109int __sched __down_interruptible(struct semaphore * sem)
110{
111 DECLARE_WAITQUEUE(wait, current);
112 int ret = 0;
113
114 DOWN_HEAD(TASK_INTERRUPTIBLE)
115
116 ret = waking_non_zero_interruptible(sem, current);
117 if (ret)
118 {
119 if (ret == 1)
120 /* ret != 0 only if we get interrupted -arca */
121 ret = 0;
122 break;
123 }
124 schedule();
125 DOWN_TAIL(TASK_INTERRUPTIBLE)
126 return ret;
127}
128
129int __down_trylock(struct semaphore * sem)
130{
131 return waking_non_zero_trylock(sem);
132}
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile
index 6bbf19f96007..a18af095cd7c 100644
--- a/arch/m68k/lib/Makefile
+++ b/arch/m68k/lib/Makefile
@@ -5,4 +5,4 @@
5EXTRA_AFLAGS := -traditional 5EXTRA_AFLAGS := -traditional
6 6
7lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ 7lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
8 checksum.o string.o semaphore.o uaccess.o 8 checksum.o string.o uaccess.o
diff --git a/arch/m68k/lib/semaphore.S b/arch/m68k/lib/semaphore.S
deleted file mode 100644
index 0215624c1602..000000000000
--- a/arch/m68k/lib/semaphore.S
+++ /dev/null
@@ -1,53 +0,0 @@
1/*
2 * linux/arch/m68k/lib/semaphore.S
3 *
4 * Copyright (C) 1996 Linus Torvalds
5 *
6 * m68k version by Andreas Schwab
7 */
8
9#include <linux/linkage.h>
10#include <asm/semaphore.h>
11
12/*
13 * The semaphore operations have a special calling sequence that
14 * allow us to do a simpler in-line version of them. These routines
15 * need to convert that sequence back into the C sequence when
16 * there is contention on the semaphore.
17 */
18ENTRY(__down_failed)
19 moveml %a0/%d0/%d1,-(%sp)
20 movel %a1,-(%sp)
21 jbsr __down
22 movel (%sp)+,%a1
23 moveml (%sp)+,%a0/%d0/%d1
24 rts
25
26ENTRY(__down_failed_interruptible)
27 movel %a0,-(%sp)
28 movel %d1,-(%sp)
29 movel %a1,-(%sp)
30 jbsr __down_interruptible
31 movel (%sp)+,%a1
32 movel (%sp)+,%d1
33 movel (%sp)+,%a0
34 rts
35
36ENTRY(__down_failed_trylock)
37 movel %a0,-(%sp)
38 movel %d1,-(%sp)
39 movel %a1,-(%sp)
40 jbsr __down_trylock
41 movel (%sp)+,%a1
42 movel (%sp)+,%d1
43 movel (%sp)+,%a0
44 rts
45
46ENTRY(__up_wakeup)
47 moveml %a0/%d0/%d1,-(%sp)
48 movel %a1,-(%sp)
49 jbsr __up
50 movel (%sp)+,%a1
51 moveml (%sp)+,%a0/%d0/%d1
52 rts
53
diff --git a/arch/m68knommu/kernel/Makefile b/arch/m68knommu/kernel/Makefile
index 1524b39ad63f..f0eab3dedb5a 100644
--- a/arch/m68knommu/kernel/Makefile
+++ b/arch/m68knommu/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := vmlinux.lds 5extra-y := vmlinux.lds
6 6
7obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \ 7obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \
8 semaphore.o setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o 8 setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o
9 9
10obj-$(CONFIG_MODULES) += module.o 10obj-$(CONFIG_MODULES) += module.o
11obj-$(CONFIG_COMEMPCI) += comempci.o 11obj-$(CONFIG_COMEMPCI) += comempci.o
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c
index 53fad1490282..39fe0a7aec32 100644
--- a/arch/m68knommu/kernel/m68k_ksyms.c
+++ b/arch/m68knommu/kernel/m68k_ksyms.c
@@ -13,7 +13,6 @@
13#include <asm/pgalloc.h> 13#include <asm/pgalloc.h>
14#include <asm/irq.h> 14#include <asm/irq.h>
15#include <asm/io.h> 15#include <asm/io.h>
16#include <asm/semaphore.h>
17#include <asm/checksum.h> 16#include <asm/checksum.h>
18#include <asm/current.h> 17#include <asm/current.h>
19 18
@@ -39,11 +38,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
39EXPORT_SYMBOL(memcpy); 38EXPORT_SYMBOL(memcpy);
40EXPORT_SYMBOL(memset); 39EXPORT_SYMBOL(memset);
41 40
42EXPORT_SYMBOL(__down_failed);
43EXPORT_SYMBOL(__down_failed_interruptible);
44EXPORT_SYMBOL(__down_failed_trylock);
45EXPORT_SYMBOL(__up_wakeup);
46
47/* 41/*
48 * libgcc functions - functions that are used internally by the 42 * libgcc functions - functions that are used internally by the
49 * compiler... (prototypes are not correct though, but that 43 * compiler... (prototypes are not correct though, but that
diff --git a/arch/m68knommu/kernel/semaphore.c b/arch/m68knommu/kernel/semaphore.c
deleted file mode 100644
index bce2bc7d87c6..000000000000
--- a/arch/m68knommu/kernel/semaphore.c
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2 * Generic semaphore code. Buyer beware. Do your own
3 * specific changes in <asm/semaphore-helper.h>
4 */
5
6#include <linux/sched.h>
7#include <linux/err.h>
8#include <linux/init.h>
9#include <asm/semaphore-helper.h>
10
11#ifndef CONFIG_RMW_INSNS
12spinlock_t semaphore_wake_lock;
13#endif
14
15/*
16 * Semaphores are implemented using a two-way counter:
17 * The "count" variable is decremented for each process
18 * that tries to sleep, while the "waking" variable is
19 * incremented when the "up()" code goes to wake up waiting
20 * processes.
21 *
22 * Notably, the inline "up()" and "down()" functions can
23 * efficiently test if they need to do any extra work (up
24 * needs to do something only if count was negative before
25 * the increment operation.
26 *
27 * waking_non_zero() (from asm/semaphore.h) must execute
28 * atomically.
29 *
30 * When __up() is called, the count was negative before
31 * incrementing it, and we need to wake up somebody.
32 *
33 * This routine adds one to the count of processes that need to
34 * wake up and exit. ALL waiting processes actually wake up but
35 * only the one that gets to the "waking" field first will gate
36 * through and acquire the semaphore. The others will go back
37 * to sleep.
38 *
39 * Note that these functions are only called when there is
40 * contention on the lock, and as such all this is the
41 * "non-critical" part of the whole semaphore business. The
42 * critical part is the inline stuff in <asm/semaphore.h>
43 * where we want to avoid any extra jumps and calls.
44 */
45void __up(struct semaphore *sem)
46{
47 wake_one_more(sem);
48 wake_up(&sem->wait);
49}
50
51/*
52 * Perform the "down" function. Return zero for semaphore acquired,
53 * return negative for signalled out of the function.
54 *
55 * If called from __down, the return is ignored and the wait loop is
56 * not interruptible. This means that a task waiting on a semaphore
57 * using "down()" cannot be killed until someone does an "up()" on
58 * the semaphore.
59 *
60 * If called from __down_interruptible, the return value gets checked
61 * upon return. If the return value is negative then the task continues
62 * with the negative value in the return register (it can be tested by
63 * the caller).
64 *
65 * Either form may be used in conjunction with "up()".
66 *
67 */
68
69
70#define DOWN_HEAD(task_state) \
71 \
72 \
73 current->state = (task_state); \
74 add_wait_queue(&sem->wait, &wait); \
75 \
76 /* \
77 * Ok, we're set up. sem->count is known to be less than zero \
78 * so we must wait. \
79 * \
80 * We can let go the lock for purposes of waiting. \
81 * We re-acquire it after awaking so as to protect \
82 * all semaphore operations. \
83 * \
84 * If "up()" is called before we call waking_non_zero() then \
85 * we will catch it right away. If it is called later then \
86 * we will have to go through a wakeup cycle to catch it. \
87 * \
88 * Multiple waiters contend for the semaphore lock to see \
89 * who gets to gate through and who has to wait some more. \
90 */ \
91 for (;;) {
92
93#define DOWN_TAIL(task_state) \
94 current->state = (task_state); \
95 } \
96 current->state = TASK_RUNNING; \
97 remove_wait_queue(&sem->wait, &wait);
98
99void __sched __down(struct semaphore * sem)
100{
101 DECLARE_WAITQUEUE(wait, current);
102
103 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
104 if (waking_non_zero(sem))
105 break;
106 schedule();
107 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
108}
109
110int __sched __down_interruptible(struct semaphore * sem)
111{
112 DECLARE_WAITQUEUE(wait, current);
113 int ret = 0;
114
115 DOWN_HEAD(TASK_INTERRUPTIBLE)
116
117 ret = waking_non_zero_interruptible(sem, current);
118 if (ret)
119 {
120 if (ret == 1)
121 /* ret != 0 only if we get interrupted -arca */
122 ret = 0;
123 break;
124 }
125 schedule();
126 DOWN_TAIL(TASK_INTERRUPTIBLE)
127 return ret;
128}
129
130int __down_trylock(struct semaphore * sem)
131{
132 return waking_non_zero_trylock(sem);
133}
diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile
index e051a7913987..d94d709665aa 100644
--- a/arch/m68knommu/lib/Makefile
+++ b/arch/m68knommu/lib/Makefile
@@ -4,4 +4,4 @@
4 4
5lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ 5lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
6 muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ 6 muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
7 checksum.o semaphore.o memcpy.o memset.o delay.o 7 checksum.o memcpy.o memset.o delay.o
diff --git a/arch/m68knommu/lib/semaphore.S b/arch/m68knommu/lib/semaphore.S
deleted file mode 100644
index 87c746034376..000000000000
--- a/arch/m68knommu/lib/semaphore.S
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * linux/arch/m68k/lib/semaphore.S
3 *
4 * Copyright (C) 1996 Linus Torvalds
5 *
6 * m68k version by Andreas Schwab
7 *
8 * MAR/1999 -- modified to support ColdFire (gerg@snapgear.com)
9 */
10
11#include <linux/linkage.h>
12#include <asm/semaphore.h>
13
14/*
15 * "down_failed" is called with the eventual return address
16 * in %a0, and the address of the semaphore in %a1. We need
17 * to increment the number of waiters on the semaphore,
18 * call "__down()", and then eventually return to try again.
19 */
20ENTRY(__down_failed)
21#ifdef CONFIG_COLDFIRE
22 subl #12,%sp
23 moveml %a0/%d0/%d1,(%sp)
24#else
25 moveml %a0/%d0/%d1,-(%sp)
26#endif
27 movel %a1,-(%sp)
28 jbsr __down
29 movel (%sp)+,%a1
30 movel (%sp)+,%d0
31 movel (%sp)+,%d1
32 rts
33
34ENTRY(__down_failed_interruptible)
35 movel %a0,-(%sp)
36 movel %d1,-(%sp)
37 movel %a1,-(%sp)
38 jbsr __down_interruptible
39 movel (%sp)+,%a1
40 movel (%sp)+,%d1
41 rts
42
43ENTRY(__up_wakeup)
44#ifdef CONFIG_COLDFIRE
45 subl #12,%sp
46 moveml %a0/%d0/%d1,(%sp)
47#else
48 moveml %a0/%d0/%d1,-(%sp)
49#endif
50 movel %a1,-(%sp)
51 jbsr __up
52 movel (%sp)+,%a1
53 movel (%sp)+,%d0
54 movel (%sp)+,%d1
55 rts
56
57ENTRY(__down_failed_trylock)
58 movel %a0,-(%sp)
59 movel %d1,-(%sp)
60 movel %a1,-(%sp)
61 jbsr __down_trylock
62 movel (%sp)+,%a1
63 movel (%sp)+,%d1
64 movel (%sp)+,%a0
65 rts
66
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 9e78e1a4ca17..6fcdb6fda2e2 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := head.o init_task.o vmlinux.lds 5extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
8 ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ 8 ptrace.o reset.o setup.o signal.o syscall.o \
9 time.o topology.o traps.o unaligned.o 9 time.o topology.o traps.o unaligned.o
10 10
11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c
deleted file mode 100644
index 1265358cdca1..000000000000
--- a/arch/mips/kernel/semaphore.c
+++ /dev/null
@@ -1,168 +0,0 @@
1/*
2 * MIPS-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
13 * to eliminate the SMP races in the old version between the updates
14 * of `count' and `waking'. Now we use negative `count' values to
15 * indicate that some process(es) are waiting for the semaphore.
16 */
17
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/init.h>
21#include <asm/atomic.h>
22#include <asm/cpu-features.h>
23#include <asm/errno.h>
24#include <asm/semaphore.h>
25#include <asm/war.h>
26/*
27 * Atomically update sem->count.
28 * This does the equivalent of the following:
29 *
30 * old_count = sem->count;
31 * tmp = MAX(old_count, 0) + incr;
32 * sem->count = tmp;
33 * return old_count;
34 *
35 * On machines without lld/scd we need a spinlock to make the manipulation of
36 * sem->count and sem->waking atomic. Scalability isn't an issue because
37 * this lock is used on UP only so it's just an empty variable.
38 */
39static inline int __sem_update_count(struct semaphore *sem, int incr)
40{
41 int old_count, tmp;
42
43 if (cpu_has_llsc && R10000_LLSC_WAR) {
44 __asm__ __volatile__(
45 " .set mips3 \n"
46 "1: ll %0, %2 # __sem_update_count \n"
47 " sra %1, %0, 31 \n"
48 " not %1 \n"
49 " and %1, %0, %1 \n"
50 " addu %1, %1, %3 \n"
51 " sc %1, %2 \n"
52 " beqzl %1, 1b \n"
53 " .set mips0 \n"
54 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
55 : "r" (incr), "m" (sem->count));
56 } else if (cpu_has_llsc) {
57 __asm__ __volatile__(
58 " .set mips3 \n"
59 "1: ll %0, %2 # __sem_update_count \n"
60 " sra %1, %0, 31 \n"
61 " not %1 \n"
62 " and %1, %0, %1 \n"
63 " addu %1, %1, %3 \n"
64 " sc %1, %2 \n"
65 " beqz %1, 1b \n"
66 " .set mips0 \n"
67 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
68 : "r" (incr), "m" (sem->count));
69 } else {
70 static DEFINE_SPINLOCK(semaphore_lock);
71 unsigned long flags;
72
73 spin_lock_irqsave(&semaphore_lock, flags);
74 old_count = atomic_read(&sem->count);
75 tmp = max_t(int, old_count, 0) + incr;
76 atomic_set(&sem->count, tmp);
77 spin_unlock_irqrestore(&semaphore_lock, flags);
78 }
79
80 return old_count;
81}
82
83void __up(struct semaphore *sem)
84{
85 /*
86 * Note that we incremented count in up() before we came here,
87 * but that was ineffective since the result was <= 0, and
88 * any negative value of count is equivalent to 0.
89 * This ends up setting count to 1, unless count is now > 0
90 * (i.e. because some other cpu has called up() in the meantime),
91 * in which case we just increment count.
92 */
93 __sem_update_count(sem, 1);
94 wake_up(&sem->wait);
95}
96
97EXPORT_SYMBOL(__up);
98
99/*
100 * Note that when we come in to __down or __down_interruptible,
101 * we have already decremented count, but that decrement was
102 * ineffective since the result was < 0, and any negative value
103 * of count is equivalent to 0.
104 * Thus it is only when we decrement count from some value > 0
105 * that we have actually got the semaphore.
106 */
107void __sched __down(struct semaphore *sem)
108{
109 struct task_struct *tsk = current;
110 DECLARE_WAITQUEUE(wait, tsk);
111
112 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
113 add_wait_queue_exclusive(&sem->wait, &wait);
114
115 /*
116 * Try to get the semaphore. If the count is > 0, then we've
117 * got the semaphore; we decrement count and exit the loop.
118 * If the count is 0 or negative, we set it to -1, indicating
119 * that we are asleep, and then sleep.
120 */
121 while (__sem_update_count(sem, -1) <= 0) {
122 schedule();
123 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
124 }
125 remove_wait_queue(&sem->wait, &wait);
126 __set_task_state(tsk, TASK_RUNNING);
127
128 /*
129 * If there are any more sleepers, wake one of them up so
130 * that it can either get the semaphore, or set count to -1
131 * indicating that there are still processes sleeping.
132 */
133 wake_up(&sem->wait);
134}
135
136EXPORT_SYMBOL(__down);
137
138int __sched __down_interruptible(struct semaphore * sem)
139{
140 int retval = 0;
141 struct task_struct *tsk = current;
142 DECLARE_WAITQUEUE(wait, tsk);
143
144 __set_task_state(tsk, TASK_INTERRUPTIBLE);
145 add_wait_queue_exclusive(&sem->wait, &wait);
146
147 while (__sem_update_count(sem, -1) <= 0) {
148 if (signal_pending(current)) {
149 /*
150 * A signal is pending - give up trying.
151 * Set sem->count to 0 if it is negative,
152 * since we are no longer sleeping.
153 */
154 __sem_update_count(sem, 0);
155 retval = -EINTR;
156 break;
157 }
158 schedule();
159 set_task_state(tsk, TASK_INTERRUPTIBLE);
160 }
161 remove_wait_queue(&sem->wait, &wait);
162 __set_task_state(tsk, TASK_RUNNING);
163
164 wake_up(&sem->wait);
165 return retval;
166}
167
168EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile
index ef07c956170a..23f2ab67574c 100644
--- a/arch/mn10300/kernel/Makefile
+++ b/arch/mn10300/kernel/Makefile
@@ -3,7 +3,7 @@
3# 3#
4extra-y := head.o init_task.o vmlinux.lds 4extra-y := head.o init_task.o vmlinux.lds
5 5
6obj-y := process.o semaphore.o signal.o entry.o fpu.o traps.o irq.o \ 6obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \
7 ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ 7 ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
8 switch_to.o mn10300_ksyms.o kernel_execve.o 8 switch_to.o mn10300_ksyms.o kernel_execve.o
9 9
diff --git a/arch/mn10300/kernel/semaphore.c b/arch/mn10300/kernel/semaphore.c
deleted file mode 100644
index 9153c4039fd2..000000000000
--- a/arch/mn10300/kernel/semaphore.c
+++ /dev/null
@@ -1,149 +0,0 @@
1/* MN10300 Semaphore implementation
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/sched.h>
12#include <linux/module.h>
13#include <asm/semaphore.h>
14
15struct sem_waiter {
16 struct list_head list;
17 struct task_struct *task;
18};
19
20#if SEMAPHORE_DEBUG
21void semtrace(struct semaphore *sem, const char *str)
22{
23 if (sem->debug)
24 printk(KERN_DEBUG "[%d] %s({%d,%d})\n",
25 current->pid,
26 str,
27 atomic_read(&sem->count),
28 list_empty(&sem->wait_list) ? 0 : 1);
29}
30#else
31#define semtrace(SEM, STR) do { } while (0)
32#endif
33
34/*
35 * wait for a token to be granted from a semaphore
36 * - entered with lock held and interrupts disabled
37 */
38void __down(struct semaphore *sem, unsigned long flags)
39{
40 struct task_struct *tsk = current;
41 struct sem_waiter waiter;
42
43 semtrace(sem, "Entering __down");
44
45 /* set up my own style of waitqueue */
46 waiter.task = tsk;
47 get_task_struct(tsk);
48
49 list_add_tail(&waiter.list, &sem->wait_list);
50
51 /* we don't need to touch the semaphore struct anymore */
52 spin_unlock_irqrestore(&sem->wait_lock, flags);
53
54 /* wait to be given the semaphore */
55 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
56
57 for (;;) {
58 if (!waiter.task)
59 break;
60 schedule();
61 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
62 }
63
64 tsk->state = TASK_RUNNING;
65 semtrace(sem, "Leaving __down");
66}
67EXPORT_SYMBOL(__down);
68
69/*
70 * interruptibly wait for a token to be granted from a semaphore
71 * - entered with lock held and interrupts disabled
72 */
73int __down_interruptible(struct semaphore *sem, unsigned long flags)
74{
75 struct task_struct *tsk = current;
76 struct sem_waiter waiter;
77 int ret;
78
79 semtrace(sem, "Entering __down_interruptible");
80
81 /* set up my own style of waitqueue */
82 waiter.task = tsk;
83 get_task_struct(tsk);
84
85 list_add_tail(&waiter.list, &sem->wait_list);
86
87 /* we don't need to touch the semaphore struct anymore */
88 set_task_state(tsk, TASK_INTERRUPTIBLE);
89
90 spin_unlock_irqrestore(&sem->wait_lock, flags);
91
92 /* wait to be given the semaphore */
93 ret = 0;
94 for (;;) {
95 if (!waiter.task)
96 break;
97 if (unlikely(signal_pending(current)))
98 goto interrupted;
99 schedule();
100 set_task_state(tsk, TASK_INTERRUPTIBLE);
101 }
102
103 out:
104 tsk->state = TASK_RUNNING;
105 semtrace(sem, "Leaving __down_interruptible");
106 return ret;
107
108 interrupted:
109 spin_lock_irqsave(&sem->wait_lock, flags);
110 list_del(&waiter.list);
111 spin_unlock_irqrestore(&sem->wait_lock, flags);
112
113 ret = 0;
114 if (!waiter.task) {
115 put_task_struct(current);
116 ret = -EINTR;
117 }
118 goto out;
119}
120EXPORT_SYMBOL(__down_interruptible);
121
122/*
123 * release a single token back to a semaphore
124 * - entered with lock held and interrupts disabled
125 */
126void __up(struct semaphore *sem)
127{
128 struct task_struct *tsk;
129 struct sem_waiter *waiter;
130
131 semtrace(sem, "Entering __up");
132
133 /* grant the token to the process at the front of the queue */
134 waiter = list_entry(sem->wait_list.next, struct sem_waiter, list);
135
136 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
137 * It is an allocated on the waiter's stack and may become invalid at
138 * any time after that point (due to a wakeup from another source).
139 */
140 list_del_init(&waiter->list);
141 tsk = waiter->task;
142 smp_mb();
143 waiter->task = NULL;
144 wake_up_process(tsk);
145 put_task_struct(tsk);
146
147 semtrace(sem, "Leaving __up");
148}
149EXPORT_SYMBOL(__up);
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index 27827bc3717e..1f6585a56f97 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -9,7 +9,7 @@ AFLAGS_pacache.o := -traditional
9 9
10obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ 10obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \
11 pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ 11 pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
12 ptrace.o hardware.o inventory.o drivers.o semaphore.o \ 12 ptrace.o hardware.o inventory.o drivers.o \
13 signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ 13 signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
14 process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ 14 process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \
15 topology.o 15 topology.o
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 7aca704e96f0..5b7fc4aa044d 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -69,11 +69,6 @@ EXPORT_SYMBOL(memcpy_toio);
69EXPORT_SYMBOL(memcpy_fromio); 69EXPORT_SYMBOL(memcpy_fromio);
70EXPORT_SYMBOL(memset_io); 70EXPORT_SYMBOL(memset_io);
71 71
72#include <asm/semaphore.h>
73EXPORT_SYMBOL(__up);
74EXPORT_SYMBOL(__down_interruptible);
75EXPORT_SYMBOL(__down);
76
77extern void $$divI(void); 72extern void $$divI(void);
78extern void $$divU(void); 73extern void $$divU(void);
79extern void $$remI(void); 74extern void $$remI(void);
diff --git a/arch/parisc/kernel/semaphore.c b/arch/parisc/kernel/semaphore.c
deleted file mode 100644
index ee806bcc3726..000000000000
--- a/arch/parisc/kernel/semaphore.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard
3 */
4
5#include <linux/sched.h>
6#include <linux/spinlock.h>
7#include <linux/errno.h>
8#include <linux/init.h>
9
10/*
11 * Semaphores are complex as we wish to avoid using two variables.
12 * `count' has multiple roles, depending on its value. If it is positive
13 * or zero, there are no waiters. The functions here will never be
14 * called; see <asm/semaphore.h>
15 *
16 * When count is -1 it indicates there is at least one task waiting
17 * for the semaphore.
18 *
19 * When count is less than that, there are '- count - 1' wakeups
20 * pending. ie if it has value -3, there are 2 wakeups pending.
21 *
22 * Note that these functions are only called when there is contention
23 * on the lock, and as such all this is the "non-critical" part of the
24 * whole semaphore business. The critical part is the inline stuff in
25 * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
26 */
27void __up(struct semaphore *sem)
28{
29 sem->count--;
30 wake_up(&sem->wait);
31}
32
33#define wakers(count) (-1 - count)
34
35#define DOWN_HEAD \
36 int ret = 0; \
37 DECLARE_WAITQUEUE(wait, current); \
38 \
39 /* Note that someone is waiting */ \
40 if (sem->count == 0) \
41 sem->count = -1; \
42 \
43 /* protected by the sentry still -- use unlocked version */ \
44 wait.flags = WQ_FLAG_EXCLUSIVE; \
45 __add_wait_queue_tail(&sem->wait, &wait); \
46 lost_race: \
47 spin_unlock_irq(&sem->sentry); \
48
49#define DOWN_TAIL \
50 spin_lock_irq(&sem->sentry); \
51 if (wakers(sem->count) == 0 && ret == 0) \
52 goto lost_race; /* Someone stole our wakeup */ \
53 __remove_wait_queue(&sem->wait, &wait); \
54 current->state = TASK_RUNNING; \
55 if (!waitqueue_active(&sem->wait) && (sem->count < 0)) \
56 sem->count = wakers(sem->count);
57
58#define UPDATE_COUNT \
59 sem->count += (sem->count < 0) ? 1 : - 1;
60
61
62void __sched __down(struct semaphore * sem)
63{
64 DOWN_HEAD
65
66 for(;;) {
67 set_task_state(current, TASK_UNINTERRUPTIBLE);
68 /* we can _read_ this without the sentry */
69 if (sem->count != -1)
70 break;
71 schedule();
72 }
73
74 DOWN_TAIL
75 UPDATE_COUNT
76}
77
78int __sched __down_interruptible(struct semaphore * sem)
79{
80 DOWN_HEAD
81
82 for(;;) {
83 set_task_state(current, TASK_INTERRUPTIBLE);
84 /* we can _read_ this without the sentry */
85 if (sem->count != -1)
86 break;
87
88 if (signal_pending(current)) {
89 ret = -EINTR;
90 break;
91 }
92 schedule();
93 }
94
95 DOWN_TAIL
96
97 if (!ret) {
98 UPDATE_COUNT
99 }
100
101 return ret;
102}
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index c1baf9d5903f..b9dbfff9afe9 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC
12CFLAGS_btext.o += -fPIC 12CFLAGS_btext.o += -fPIC
13endif 13endif
14 14
15obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ 15obj-y := cputable.o ptrace.o syscalls.o \
16 irq.o align.o signal_32.o pmc.o vdso.o \ 16 irq.o align.o signal_32.o pmc.o vdso.o \
17 init_task.o process.o systbl.o idle.o \ 17 init_task.o process.o systbl.o idle.o \
18 signal.o 18 signal.o
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 9c98424277a8..65d14e6ddc3c 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -15,7 +15,6 @@
15#include <linux/bitops.h> 15#include <linux/bitops.h>
16 16
17#include <asm/page.h> 17#include <asm/page.h>
18#include <asm/semaphore.h>
19#include <asm/processor.h> 18#include <asm/processor.h>
20#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
21#include <asm/uaccess.h> 20#include <asm/uaccess.h>
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c
deleted file mode 100644
index 2f8c3c951394..000000000000
--- a/arch/powerpc/kernel/semaphore.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * PowerPC-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
12 * to eliminate the SMP races in the old version between the updates
13 * of `count' and `waking'. Now we use negative `count' values to
14 * indicate that some process(es) are waiting for the semaphore.
15 */
16
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <linux/module.h>
20
21#include <asm/atomic.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * Atomically update sem->count.
27 * This does the equivalent of the following:
28 *
29 * old_count = sem->count;
30 * tmp = MAX(old_count, 0) + incr;
31 * sem->count = tmp;
32 * return old_count;
33 */
34static inline int __sem_update_count(struct semaphore *sem, int incr)
35{
36 int old_count, tmp;
37
38 __asm__ __volatile__("\n"
39"1: lwarx %0,0,%3\n"
40" srawi %1,%0,31\n"
41" andc %1,%0,%1\n"
42" add %1,%1,%4\n"
43 PPC405_ERR77(0,%3)
44" stwcx. %1,0,%3\n"
45" bne 1b"
46 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
47 : "r" (&sem->count), "r" (incr), "m" (sem->count)
48 : "cc");
49
50 return old_count;
51}
52
53void __up(struct semaphore *sem)
54{
55 /*
56 * Note that we incremented count in up() before we came here,
57 * but that was ineffective since the result was <= 0, and
58 * any negative value of count is equivalent to 0.
59 * This ends up setting count to 1, unless count is now > 0
60 * (i.e. because some other cpu has called up() in the meantime),
61 * in which case we just increment count.
62 */
63 __sem_update_count(sem, 1);
64 wake_up(&sem->wait);
65}
66EXPORT_SYMBOL(__up);
67
68/*
69 * Note that when we come in to __down or __down_interruptible,
70 * we have already decremented count, but that decrement was
71 * ineffective since the result was < 0, and any negative value
72 * of count is equivalent to 0.
73 * Thus it is only when we decrement count from some value > 0
74 * that we have actually got the semaphore.
75 */
76void __sched __down(struct semaphore *sem)
77{
78 struct task_struct *tsk = current;
79 DECLARE_WAITQUEUE(wait, tsk);
80
81 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
82 add_wait_queue_exclusive(&sem->wait, &wait);
83
84 /*
85 * Try to get the semaphore. If the count is > 0, then we've
86 * got the semaphore; we decrement count and exit the loop.
87 * If the count is 0 or negative, we set it to -1, indicating
88 * that we are asleep, and then sleep.
89 */
90 while (__sem_update_count(sem, -1) <= 0) {
91 schedule();
92 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
93 }
94 remove_wait_queue(&sem->wait, &wait);
95 __set_task_state(tsk, TASK_RUNNING);
96
97 /*
98 * If there are any more sleepers, wake one of them up so
99 * that it can either get the semaphore, or set count to -1
100 * indicating that there are still processes sleeping.
101 */
102 wake_up(&sem->wait);
103}
104EXPORT_SYMBOL(__down);
105
106int __sched __down_interruptible(struct semaphore * sem)
107{
108 int retval = 0;
109 struct task_struct *tsk = current;
110 DECLARE_WAITQUEUE(wait, tsk);
111
112 __set_task_state(tsk, TASK_INTERRUPTIBLE);
113 add_wait_queue_exclusive(&sem->wait, &wait);
114
115 while (__sem_update_count(sem, -1) <= 0) {
116 if (signal_pending(current)) {
117 /*
118 * A signal is pending - give up trying.
119 * Set sem->count to 0 if it is negative,
120 * since we are no longer sleeping.
121 */
122 __sem_update_count(sem, 0);
123 retval = -EINTR;
124 break;
125 }
126 schedule();
127 set_task_state(tsk, TASK_INTERRUPTIBLE);
128 }
129 remove_wait_queue(&sem->wait, &wait);
130 __set_task_state(tsk, TASK_RUNNING);
131
132 wake_up(&sem->wait);
133 return retval;
134}
135EXPORT_SYMBOL(__down_interruptible);
diff --git a/arch/ppc/kernel/semaphore.c b/arch/ppc/kernel/semaphore.c
deleted file mode 100644
index 2fe429b27c14..000000000000
--- a/arch/ppc/kernel/semaphore.c
+++ /dev/null
@@ -1,131 +0,0 @@
1/*
2 * PowerPC-specific semaphore code.
3 *
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
12 * to eliminate the SMP races in the old version between the updates
13 * of `count' and `waking'. Now we use negative `count' values to
14 * indicate that some process(es) are waiting for the semaphore.
15 */
16
17#include <linux/sched.h>
18#include <linux/init.h>
19#include <asm/atomic.h>
20#include <asm/semaphore.h>
21#include <asm/errno.h>
22
23/*
24 * Atomically update sem->count.
25 * This does the equivalent of the following:
26 *
27 * old_count = sem->count;
28 * tmp = MAX(old_count, 0) + incr;
29 * sem->count = tmp;
30 * return old_count;
31 */
32static inline int __sem_update_count(struct semaphore *sem, int incr)
33{
34 int old_count, tmp;
35
36 __asm__ __volatile__("\n"
37"1: lwarx %0,0,%3\n"
38" srawi %1,%0,31\n"
39" andc %1,%0,%1\n"
40" add %1,%1,%4\n"
41 PPC405_ERR77(0,%3)
42" stwcx. %1,0,%3\n"
43" bne 1b"
44 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
45 : "r" (&sem->count), "r" (incr), "m" (sem->count)
46 : "cc");
47
48 return old_count;
49}
50
51void __up(struct semaphore *sem)
52{
53 /*
54 * Note that we incremented count in up() before we came here,
55 * but that was ineffective since the result was <= 0, and
56 * any negative value of count is equivalent to 0.
57 * This ends up setting count to 1, unless count is now > 0
58 * (i.e. because some other cpu has called up() in the meantime),
59 * in which case we just increment count.
60 */
61 __sem_update_count(sem, 1);
62 wake_up(&sem->wait);
63}
64
65/*
66 * Note that when we come in to __down or __down_interruptible,
67 * we have already decremented count, but that decrement was
68 * ineffective since the result was < 0, and any negative value
69 * of count is equivalent to 0.
70 * Thus it is only when we decrement count from some value > 0
71 * that we have actually got the semaphore.
72 */
73void __sched __down(struct semaphore *sem)
74{
75 struct task_struct *tsk = current;
76 DECLARE_WAITQUEUE(wait, tsk);
77
78 tsk->state = TASK_UNINTERRUPTIBLE;
79 add_wait_queue_exclusive(&sem->wait, &wait);
80 smp_wmb();
81
82 /*
83 * Try to get the semaphore. If the count is > 0, then we've
84 * got the semaphore; we decrement count and exit the loop.
85 * If the count is 0 or negative, we set it to -1, indicating
86 * that we are asleep, and then sleep.
87 */
88 while (__sem_update_count(sem, -1) <= 0) {
89 schedule();
90 tsk->state = TASK_UNINTERRUPTIBLE;
91 }
92 remove_wait_queue(&sem->wait, &wait);
93 tsk->state = TASK_RUNNING;
94
95 /*
96 * If there are any more sleepers, wake one of them up so
97 * that it can either get the semaphore, or set count to -1
98 * indicating that there are still processes sleeping.
99 */
100 wake_up(&sem->wait);
101}
102
103int __sched __down_interruptible(struct semaphore * sem)
104{
105 int retval = 0;
106 struct task_struct *tsk = current;
107 DECLARE_WAITQUEUE(wait, tsk);
108
109 tsk->state = TASK_INTERRUPTIBLE;
110 add_wait_queue_exclusive(&sem->wait, &wait);
111 smp_wmb();
112
113 while (__sem_update_count(sem, -1) <= 0) {
114 if (signal_pending(current)) {
115 /*
116 * A signal is pending - give up trying.
117 * Set sem->count to 0 if it is negative,
118 * since we are no longer sleeping.
119 */
120 __sem_update_count(sem, 0);
121 retval = -EINTR;
122 break;
123 }
124 schedule();
125 tsk->state = TASK_INTERRUPTIBLE;
126 }
127 tsk->state = TASK_RUNNING;
128 remove_wait_queue(&sem->wait, &wait);
129 wake_up(&sem->wait);
130 return retval;
131}
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 4d3e38392cb1..ce144b67f060 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -11,7 +11,7 @@ CFLAGS_smp.o := -Wno-nonnull
11 11
12obj-y := bitmap.o traps.o time.o process.o base.o early.o \ 12obj-y := bitmap.o traps.o time.o process.o base.o early.o \
13 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ 13 setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
14 semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o 14 s390_ext.o debug.o irq.o ipl.o dis.o diag.o
15 15
16obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) 16obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
17obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) 17obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index 7234c737f825..48238a114ce9 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -27,13 +27,6 @@ EXPORT_SYMBOL(_zb_findmap);
27EXPORT_SYMBOL(_sb_findmap); 27EXPORT_SYMBOL(_sb_findmap);
28 28
29/* 29/*
30 * semaphore ops
31 */
32EXPORT_SYMBOL(__up);
33EXPORT_SYMBOL(__down);
34EXPORT_SYMBOL(__down_interruptible);
35
36/*
37 * binfmt_elf loader 30 * binfmt_elf loader
38 */ 31 */
39extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); 32extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c
deleted file mode 100644
index 191303f6c1d8..000000000000
--- a/arch/s390/kernel/semaphore.c
+++ /dev/null
@@ -1,108 +0,0 @@
1/*
2 * linux/arch/s390/kernel/semaphore.c
3 *
4 * S390 version
5 * Copyright (C) 1998-2000 IBM Corporation
6 * Author(s): Martin Schwidefsky
7 *
8 * Derived from "linux/arch/i386/kernel/semaphore.c
9 * Copyright (C) 1999, Linus Torvalds
10 *
11 */
12#include <linux/sched.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15
16#include <asm/semaphore.h>
17
18/*
19 * Atomically update sem->count. Equivalent to:
20 * old_val = sem->count.counter;
21 * new_val = ((old_val >= 0) ? old_val : 0) + incr;
22 * sem->count.counter = new_val;
23 * return old_val;
24 */
25static inline int __sem_update_count(struct semaphore *sem, int incr)
26{
27 int old_val, new_val;
28
29 asm volatile(
30 " l %0,0(%3)\n"
31 "0: ltr %1,%0\n"
32 " jhe 1f\n"
33 " lhi %1,0\n"
34 "1: ar %1,%4\n"
35 " cs %0,%1,0(%3)\n"
36 " jl 0b\n"
37 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
38 : "a" (&sem->count), "d" (incr), "m" (sem->count)
39 : "cc");
40 return old_val;
41}
42
43/*
44 * The inline function up() incremented count but the result
45 * was <= 0. This indicates that some process is waiting on
46 * the semaphore. The semaphore is free and we'll wake the
47 * first sleeping process, so we set count to 1 unless some
48 * other cpu has called up in the meantime in which case
49 * we just increment count by 1.
50 */
51void __up(struct semaphore *sem)
52{
53 __sem_update_count(sem, 1);
54 wake_up(&sem->wait);
55}
56
57/*
58 * The inline function down() decremented count and the result
59 * was < 0. The wait loop will atomically test and update the
60 * semaphore counter following the rules:
61 * count > 0: decrement count, wake up queue and exit.
62 * count <= 0: set count to -1, go to sleep.
63 */
64void __sched __down(struct semaphore * sem)
65{
66 struct task_struct *tsk = current;
67 DECLARE_WAITQUEUE(wait, tsk);
68
69 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
70 add_wait_queue_exclusive(&sem->wait, &wait);
71 while (__sem_update_count(sem, -1) <= 0) {
72 schedule();
73 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
74 }
75 remove_wait_queue(&sem->wait, &wait);
76 __set_task_state(tsk, TASK_RUNNING);
77 wake_up(&sem->wait);
78}
79
80/*
81 * Same as __down() with an additional test for signals.
82 * If a signal is pending the count is updated as follows:
83 * count > 0: wake up queue and exit.
84 * count <= 0: set count to 0, wake up queue and exit.
85 */
86int __sched __down_interruptible(struct semaphore * sem)
87{
88 int retval = 0;
89 struct task_struct *tsk = current;
90 DECLARE_WAITQUEUE(wait, tsk);
91
92 __set_task_state(tsk, TASK_INTERRUPTIBLE);
93 add_wait_queue_exclusive(&sem->wait, &wait);
94 while (__sem_update_count(sem, -1) <= 0) {
95 if (signal_pending(current)) {
96 __sem_update_count(sem, 0);
97 retval = -EINTR;
98 break;
99 }
100 schedule();
101 set_task_state(tsk, TASK_INTERRUPTIBLE);
102 }
103 remove_wait_queue(&sem->wait, &wait);
104 __set_task_state(tsk, TASK_RUNNING);
105 wake_up(&sem->wait);
106 return retval;
107}
108
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32
index 62bf373266f7..4bbdce36b92b 100644
--- a/arch/sh/kernel/Makefile_32
+++ b/arch/sh/kernel/Makefile_32
@@ -5,7 +5,7 @@
5extra-y := head_32.o init_task.o vmlinux.lds 5extra-y := head_32.o init_task.o vmlinux.lds
6 6
7obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \ 7obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
8 ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \ 8 ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \
9 syscalls_32.o time_32.o topology.o traps.o traps_32.o 9 syscalls_32.o time_32.o topology.o traps.o traps_32.o
10 10
11obj-y += cpu/ timers/ 11obj-y += cpu/ timers/
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64
index e01283d49cbf..6edf53b93d94 100644
--- a/arch/sh/kernel/Makefile_64
+++ b/arch/sh/kernel/Makefile_64
@@ -1,7 +1,7 @@
1extra-y := head_64.o init_task.o vmlinux.lds 1extra-y := head_64.o init_task.o vmlinux.lds
2 2
3obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \ 3obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
4 ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \ 4 ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \
5 syscalls_64.o time_64.o topology.o traps.o traps_64.o 5 syscalls_64.o time_64.o topology.o traps.o traps_64.o
6 6
7obj-y += cpu/ timers/ 7obj-y += cpu/ timers/
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c
deleted file mode 100644
index 184119eeae56..000000000000
--- a/arch/sh/kernel/semaphore.c
+++ /dev/null
@@ -1,139 +0,0 @@
1/*
2 * Just taken from alpha implementation.
3 * This can't work well, perhaps.
4 */
5/*
6 * Generic semaphore code. Buyer beware. Do your own
7 * specific changes in <asm/semaphore-helper.h>
8 */
9
10#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/wait.h>
13#include <linux/init.h>
14#include <asm/semaphore.h>
15#include <asm/semaphore-helper.h>
16
17DEFINE_SPINLOCK(semaphore_wake_lock);
18
19/*
20 * Semaphores are implemented using a two-way counter:
21 * The "count" variable is decremented for each process
22 * that tries to sleep, while the "waking" variable is
23 * incremented when the "up()" code goes to wake up waiting
24 * processes.
25 *
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
30 *
31 * waking_non_zero() (from asm/semaphore.h) must execute
32 * atomically.
33 *
34 * When __up() is called, the count was negative before
35 * incrementing it, and we need to wake up somebody.
36 *
37 * This routine adds one to the count of processes that need to
38 * wake up and exit. ALL waiting processes actually wake up but
39 * only the one that gets to the "waking" field first will gate
40 * through and acquire the semaphore. The others will go back
41 * to sleep.
42 *
43 * Note that these functions are only called when there is
44 * contention on the lock, and as such all this is the
45 * "non-critical" part of the whole semaphore business. The
46 * critical part is the inline stuff in <asm/semaphore.h>
47 * where we want to avoid any extra jumps and calls.
48 */
49void __up(struct semaphore *sem)
50{
51 wake_one_more(sem);
52 wake_up(&sem->wait);
53}
54
55/*
56 * Perform the "down" function. Return zero for semaphore acquired,
57 * return negative for signalled out of the function.
58 *
59 * If called from __down, the return is ignored and the wait loop is
60 * not interruptible. This means that a task waiting on a semaphore
61 * using "down()" cannot be killed until someone does an "up()" on
62 * the semaphore.
63 *
64 * If called from __down_interruptible, the return value gets checked
65 * upon return. If the return value is negative then the task continues
66 * with the negative value in the return register (it can be tested by
67 * the caller).
68 *
69 * Either form may be used in conjunction with "up()".
70 *
71 */
72
73#define DOWN_VAR \
74 struct task_struct *tsk = current; \
75 wait_queue_t wait; \
76 init_waitqueue_entry(&wait, tsk);
77
78#define DOWN_HEAD(task_state) \
79 \
80 \
81 tsk->state = (task_state); \
82 add_wait_queue(&sem->wait, &wait); \
83 \
84 /* \
85 * Ok, we're set up. sem->count is known to be less than zero \
86 * so we must wait. \
87 * \
88 * We can let go the lock for purposes of waiting. \
89 * We re-acquire it after awaking so as to protect \
90 * all semaphore operations. \
91 * \
92 * If "up()" is called before we call waking_non_zero() then \
93 * we will catch it right away. If it is called later then \
94 * we will have to go through a wakeup cycle to catch it. \
95 * \
96 * Multiple waiters contend for the semaphore lock to see \
97 * who gets to gate through and who has to wait some more. \
98 */ \
99 for (;;) {
100
101#define DOWN_TAIL(task_state) \
102 tsk->state = (task_state); \
103 } \
104 tsk->state = TASK_RUNNING; \
105 remove_wait_queue(&sem->wait, &wait);
106
107void __sched __down(struct semaphore * sem)
108{
109 DOWN_VAR
110 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
111 if (waking_non_zero(sem))
112 break;
113 schedule();
114 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
115}
116
117int __sched __down_interruptible(struct semaphore * sem)
118{
119 int ret = 0;
120 DOWN_VAR
121 DOWN_HEAD(TASK_INTERRUPTIBLE)
122
123 ret = waking_non_zero_interruptible(sem, tsk);
124 if (ret)
125 {
126 if (ret == 1)
127 /* ret != 0 only if we get interrupted -arca */
128 ret = 0;
129 break;
130 }
131 schedule();
132 DOWN_TAIL(TASK_INTERRUPTIBLE)
133 return ret;
134}
135
136int __down_trylock(struct semaphore * sem)
137{
138 return waking_non_zero_trylock(sem);
139}
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 45bb333fd9ec..6d405462cee8 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -9,7 +9,6 @@
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/irq.h> 10#include <linux/irq.h>
11#include <asm/sections.h> 11#include <asm/sections.h>
12#include <asm/semaphore.h>
13#include <asm/processor.h> 12#include <asm/processor.h>
14#include <asm/uaccess.h> 13#include <asm/uaccess.h>
15#include <asm/checksum.h> 14#include <asm/checksum.h>
@@ -48,12 +47,6 @@ EXPORT_SYMBOL(__copy_user);
48EXPORT_SYMBOL(get_vm_area); 47EXPORT_SYMBOL(get_vm_area);
49#endif 48#endif
50 49
51/* semaphore exports */
52EXPORT_SYMBOL(__up);
53EXPORT_SYMBOL(__down);
54EXPORT_SYMBOL(__down_interruptible);
55EXPORT_SYMBOL(__down_trylock);
56
57EXPORT_SYMBOL(__udelay); 50EXPORT_SYMBOL(__udelay);
58EXPORT_SYMBOL(__ndelay); 51EXPORT_SYMBOL(__ndelay);
59EXPORT_SYMBOL(__const_udelay); 52EXPORT_SYMBOL(__const_udelay);
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index b6410ce4bd1d..a310c9707f03 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -16,7 +16,6 @@
16#include <linux/in6.h> 16#include <linux/in6.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/screen_info.h> 18#include <linux/screen_info.h>
19#include <asm/semaphore.h>
20#include <asm/processor.h> 19#include <asm/processor.h>
21#include <asm/uaccess.h> 20#include <asm/uaccess.h>
22#include <asm/checksum.h> 21#include <asm/checksum.h>
@@ -37,9 +36,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
37EXPORT_SYMBOL(screen_info); 36EXPORT_SYMBOL(screen_info);
38#endif 37#endif
39 38
40EXPORT_SYMBOL(__down);
41EXPORT_SYMBOL(__down_trylock);
42EXPORT_SYMBOL(__up);
43EXPORT_SYMBOL(__put_user_asm_l); 39EXPORT_SYMBOL(__put_user_asm_l);
44EXPORT_SYMBOL(__get_user_asm_l); 40EXPORT_SYMBOL(__get_user_asm_l);
45EXPORT_SYMBOL(copy_page); 41EXPORT_SYMBOL(copy_page);
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index bf1b15d3f6f5..2712bb166f6f 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -12,7 +12,7 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
12 sys_sparc.o sunos_asm.o systbls.o \ 12 sys_sparc.o sunos_asm.o systbls.o \
13 time.o windows.o cpu.o devices.o sclow.o \ 13 time.o windows.o cpu.o devices.o sclow.o \
14 tadpole.o tick14.o ptrace.o sys_solaris.o \ 14 tadpole.o tick14.o ptrace.o sys_solaris.o \
15 unaligned.o una_asm.o muldiv.o semaphore.o \ 15 unaligned.o una_asm.o muldiv.o \
16 prom.o of_device.o devres.o 16 prom.o of_device.o devres.o
17 17
18devres-y = ../../../kernel/irq/devres.o 18devres-y = ../../../kernel/irq/devres.o
diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c
deleted file mode 100644
index 0c37c1a7cd7e..000000000000
--- a/arch/sparc/kernel/semaphore.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */
2
3/* sparc32 semaphore implementation, based on i386 version */
4
5#include <linux/sched.h>
6#include <linux/errno.h>
7#include <linux/init.h>
8
9#include <asm/semaphore.h>
10
11/*
12 * Semaphores are implemented using a two-way counter:
13 * The "count" variable is decremented for each process
14 * that tries to acquire the semaphore, while the "sleeping"
15 * variable is a count of such acquires.
16 *
17 * Notably, the inline "up()" and "down()" functions can
18 * efficiently test if they need to do any extra work (up
19 * needs to do something only if count was negative before
20 * the increment operation.
21 *
22 * "sleeping" and the contention routine ordering is
23 * protected by the semaphore spinlock.
24 *
25 * Note that these functions are only called when there is
26 * contention on the lock, and as such all this is the
27 * "non-critical" part of the whole semaphore business. The
28 * critical part is the inline stuff in <asm/semaphore.h>
29 * where we want to avoid any extra jumps and calls.
30 */
31
32/*
33 * Logic:
34 * - only on a boundary condition do we need to care. When we go
35 * from a negative count to a non-negative, we wake people up.
36 * - when we go from a non-negative count to a negative do we
37 * (a) synchronize with the "sleeper" count and (b) make sure
38 * that we're on the wakeup list before we synchronize so that
39 * we cannot lose wakeup events.
40 */
41
42void __up(struct semaphore *sem)
43{
44 wake_up(&sem->wait);
45}
46
47static DEFINE_SPINLOCK(semaphore_lock);
48
49void __sched __down(struct semaphore * sem)
50{
51 struct task_struct *tsk = current;
52 DECLARE_WAITQUEUE(wait, tsk);
53 tsk->state = TASK_UNINTERRUPTIBLE;
54 add_wait_queue_exclusive(&sem->wait, &wait);
55
56 spin_lock_irq(&semaphore_lock);
57 sem->sleepers++;
58 for (;;) {
59 int sleepers = sem->sleepers;
60
61 /*
62 * Add "everybody else" into it. They aren't
63 * playing, because we own the spinlock.
64 */
65 if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
66 sem->sleepers = 0;
67 break;
68 }
69 sem->sleepers = 1; /* us - see -1 above */
70 spin_unlock_irq(&semaphore_lock);
71
72 schedule();
73 tsk->state = TASK_UNINTERRUPTIBLE;
74 spin_lock_irq(&semaphore_lock);
75 }
76 spin_unlock_irq(&semaphore_lock);
77 remove_wait_queue(&sem->wait, &wait);
78 tsk->state = TASK_RUNNING;
79 wake_up(&sem->wait);
80}
81
82int __sched __down_interruptible(struct semaphore * sem)
83{
84 int retval = 0;
85 struct task_struct *tsk = current;
86 DECLARE_WAITQUEUE(wait, tsk);
87 tsk->state = TASK_INTERRUPTIBLE;
88 add_wait_queue_exclusive(&sem->wait, &wait);
89
90 spin_lock_irq(&semaphore_lock);
91 sem->sleepers ++;
92 for (;;) {
93 int sleepers = sem->sleepers;
94
95 /*
96 * With signals pending, this turns into
97 * the trylock failure case - we won't be
98 * sleeping, and we* can't get the lock as
99 * it has contention. Just correct the count
100 * and exit.
101 */
102 if (signal_pending(current)) {
103 retval = -EINTR;
104 sem->sleepers = 0;
105 atomic24_add(sleepers, &sem->count);
106 break;
107 }
108
109 /*
110 * Add "everybody else" into it. They aren't
111 * playing, because we own the spinlock. The
112 * "-1" is because we're still hoping to get
113 * the lock.
114 */
115 if (!atomic24_add_negative(sleepers - 1, &sem->count)) {
116 sem->sleepers = 0;
117 break;
118 }
119 sem->sleepers = 1; /* us - see -1 above */
120 spin_unlock_irq(&semaphore_lock);
121
122 schedule();
123 tsk->state = TASK_INTERRUPTIBLE;
124 spin_lock_irq(&semaphore_lock);
125 }
126 spin_unlock_irq(&semaphore_lock);
127 tsk->state = TASK_RUNNING;
128 remove_wait_queue(&sem->wait, &wait);
129 wake_up(&sem->wait);
130 return retval;
131}
132
133/*
134 * Trylock failed - make sure we correct for
135 * having decremented the count.
136 */
137int __down_trylock(struct semaphore * sem)
138{
139 int sleepers;
140 unsigned long flags;
141
142 spin_lock_irqsave(&semaphore_lock, flags);
143 sleepers = sem->sleepers + 1;
144 sem->sleepers = 0;
145
146 /*
147 * Add "everybody else" and us into it. They aren't
148 * playing, because we own the spinlock.
149 */
150 if (!atomic24_add_negative(sleepers, &sem->count))
151 wake_up(&sem->wait);
152
153 spin_unlock_irqrestore(&semaphore_lock, flags);
154 return 1;
155}
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c
index c1025e551650..97b1de0e9094 100644
--- a/arch/sparc/kernel/sparc_ksyms.c
+++ b/arch/sparc/kernel/sparc_ksyms.c
@@ -107,11 +107,6 @@ EXPORT_SYMBOL(___rw_read_try);
107EXPORT_SYMBOL(___rw_read_exit); 107EXPORT_SYMBOL(___rw_read_exit);
108EXPORT_SYMBOL(___rw_write_enter); 108EXPORT_SYMBOL(___rw_write_enter);
109#endif 109#endif
110/* semaphores */
111EXPORT_SYMBOL(__up);
112EXPORT_SYMBOL(__down);
113EXPORT_SYMBOL(__down_trylock);
114EXPORT_SYMBOL(__down_interruptible);
115 110
116EXPORT_SYMBOL(sparc_valid_addr_bitmap); 111EXPORT_SYMBOL(sparc_valid_addr_bitmap);
117EXPORT_SYMBOL(phys_base); 112EXPORT_SYMBOL(phys_base);
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile
index 1bf5b187de49..459462e80a12 100644
--- a/arch/sparc64/kernel/Makefile
+++ b/arch/sparc64/kernel/Makefile
@@ -10,7 +10,7 @@ extra-y := head.o init_task.o vmlinux.lds
10obj-y := process.o setup.o cpu.o idprom.o \ 10obj-y := process.o setup.o cpu.o idprom.o \
11 traps.o auxio.o una_asm.o sysfs.o iommu.o \ 11 traps.o auxio.o una_asm.o sysfs.o iommu.o \
12 irq.o ptrace.o time.o sys_sparc.o signal.o \ 12 irq.o ptrace.o time.o sys_sparc.o signal.o \
13 unaligned.o central.o pci.o starfire.o semaphore.o \ 13 unaligned.o central.o pci.o starfire.o \
14 power.o sbus.o sparc64_ksyms.o chmc.o \ 14 power.o sbus.o sparc64_ksyms.o chmc.o \
15 visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o 15 visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
16 16
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c
deleted file mode 100644
index 9974a6899551..000000000000
--- a/arch/sparc64/kernel/semaphore.c
+++ /dev/null
@@ -1,254 +0,0 @@
1/* semaphore.c: Sparc64 semaphore implementation.
2 *
3 * This is basically the PPC semaphore scheme ported to use
4 * the sparc64 atomic instructions, so see the PPC code for
5 * credits.
6 */
7
8#include <linux/sched.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11
12/*
13 * Atomically update sem->count.
14 * This does the equivalent of the following:
15 *
16 * old_count = sem->count;
17 * tmp = MAX(old_count, 0) + incr;
18 * sem->count = tmp;
19 * return old_count;
20 */
21static inline int __sem_update_count(struct semaphore *sem, int incr)
22{
23 int old_count, tmp;
24
25 __asm__ __volatile__("\n"
26" ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n"
27"1: ldsw [%3], %0\n"
28" mov %0, %1\n"
29" cmp %0, 0\n"
30" movl %%icc, 0, %1\n"
31" add %1, %4, %1\n"
32" cas [%3], %0, %1\n"
33" cmp %0, %1\n"
34" membar #StoreLoad | #StoreStore\n"
35" bne,pn %%icc, 1b\n"
36" nop\n"
37 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
38 : "r" (&sem->count), "r" (incr), "m" (sem->count)
39 : "cc");
40
41 return old_count;
42}
43
44static void __up(struct semaphore *sem)
45{
46 __sem_update_count(sem, 1);
47 wake_up(&sem->wait);
48}
49
50void up(struct semaphore *sem)
51{
52 /* This atomically does:
53 * old_val = sem->count;
54 * new_val = sem->count + 1;
55 * sem->count = new_val;
56 * if (old_val < 0)
57 * __up(sem);
58 *
59 * The (old_val < 0) test is equivalent to
60 * the more straightforward (new_val <= 0),
61 * but it is easier to test the former because
62 * of how the CAS instruction works.
63 */
64
65 __asm__ __volatile__("\n"
66" ! up sem(%0)\n"
67" membar #StoreLoad | #LoadLoad\n"
68"1: lduw [%0], %%g1\n"
69" add %%g1, 1, %%g7\n"
70" cas [%0], %%g1, %%g7\n"
71" cmp %%g1, %%g7\n"
72" bne,pn %%icc, 1b\n"
73" addcc %%g7, 1, %%g0\n"
74" membar #StoreLoad | #StoreStore\n"
75" ble,pn %%icc, 3f\n"
76" nop\n"
77"2:\n"
78" .subsection 2\n"
79"3: mov %0, %%g1\n"
80" save %%sp, -160, %%sp\n"
81" call %1\n"
82" mov %%g1, %%o0\n"
83" ba,pt %%xcc, 2b\n"
84" restore\n"
85" .previous\n"
86 : : "r" (sem), "i" (__up)
87 : "g1", "g2", "g3", "g7", "memory", "cc");
88}
89
90static void __sched __down(struct semaphore * sem)
91{
92 struct task_struct *tsk = current;
93 DECLARE_WAITQUEUE(wait, tsk);
94
95 tsk->state = TASK_UNINTERRUPTIBLE;
96 add_wait_queue_exclusive(&sem->wait, &wait);
97
98 while (__sem_update_count(sem, -1) <= 0) {
99 schedule();
100 tsk->state = TASK_UNINTERRUPTIBLE;
101 }
102 remove_wait_queue(&sem->wait, &wait);
103 tsk->state = TASK_RUNNING;
104
105 wake_up(&sem->wait);
106}
107
108void __sched down(struct semaphore *sem)
109{
110 might_sleep();
111 /* This atomically does:
112 * old_val = sem->count;
113 * new_val = sem->count - 1;
114 * sem->count = new_val;
115 * if (old_val < 1)
116 * __down(sem);
117 *
118 * The (old_val < 1) test is equivalent to
119 * the more straightforward (new_val < 0),
120 * but it is easier to test the former because
121 * of how the CAS instruction works.
122 */
123
124 __asm__ __volatile__("\n"
125" ! down sem(%0)\n"
126"1: lduw [%0], %%g1\n"
127" sub %%g1, 1, %%g7\n"
128" cas [%0], %%g1, %%g7\n"
129" cmp %%g1, %%g7\n"
130" bne,pn %%icc, 1b\n"
131" cmp %%g7, 1\n"
132" membar #StoreLoad | #StoreStore\n"
133" bl,pn %%icc, 3f\n"
134" nop\n"
135"2:\n"
136" .subsection 2\n"
137"3: mov %0, %%g1\n"
138" save %%sp, -160, %%sp\n"
139" call %1\n"
140" mov %%g1, %%o0\n"
141" ba,pt %%xcc, 2b\n"
142" restore\n"
143" .previous\n"
144 : : "r" (sem), "i" (__down)
145 : "g1", "g2", "g3", "g7", "memory", "cc");
146}
147
148int down_trylock(struct semaphore *sem)
149{
150 int ret;
151
152 /* This atomically does:
153 * old_val = sem->count;
154 * new_val = sem->count - 1;
155 * if (old_val < 1) {
156 * ret = 1;
157 * } else {
158 * sem->count = new_val;
159 * ret = 0;
160 * }
161 *
162 * The (old_val < 1) test is equivalent to
163 * the more straightforward (new_val < 0),
164 * but it is easier to test the former because
165 * of how the CAS instruction works.
166 */
167
168 __asm__ __volatile__("\n"
169" ! down_trylock sem(%1) ret(%0)\n"
170"1: lduw [%1], %%g1\n"
171" sub %%g1, 1, %%g7\n"
172" cmp %%g1, 1\n"
173" bl,pn %%icc, 2f\n"
174" mov 1, %0\n"
175" cas [%1], %%g1, %%g7\n"
176" cmp %%g1, %%g7\n"
177" bne,pn %%icc, 1b\n"
178" mov 0, %0\n"
179" membar #StoreLoad | #StoreStore\n"
180"2:\n"
181 : "=&r" (ret)
182 : "r" (sem)
183 : "g1", "g7", "memory", "cc");
184
185 return ret;
186}
187
188static int __sched __down_interruptible(struct semaphore * sem)
189{
190 int retval = 0;
191 struct task_struct *tsk = current;
192 DECLARE_WAITQUEUE(wait, tsk);
193
194 tsk->state = TASK_INTERRUPTIBLE;
195 add_wait_queue_exclusive(&sem->wait, &wait);
196
197 while (__sem_update_count(sem, -1) <= 0) {
198 if (signal_pending(current)) {
199 __sem_update_count(sem, 0);
200 retval = -EINTR;
201 break;
202 }
203 schedule();
204 tsk->state = TASK_INTERRUPTIBLE;
205 }
206 tsk->state = TASK_RUNNING;
207 remove_wait_queue(&sem->wait, &wait);
208 wake_up(&sem->wait);
209 return retval;
210}
211
212int __sched down_interruptible(struct semaphore *sem)
213{
214 int ret = 0;
215
216 might_sleep();
217 /* This atomically does:
218 * old_val = sem->count;
219 * new_val = sem->count - 1;
220 * sem->count = new_val;
221 * if (old_val < 1)
222 * ret = __down_interruptible(sem);
223 *
224 * The (old_val < 1) test is equivalent to
225 * the more straightforward (new_val < 0),
226 * but it is easier to test the former because
227 * of how the CAS instruction works.
228 */
229
230 __asm__ __volatile__("\n"
231" ! down_interruptible sem(%2) ret(%0)\n"
232"1: lduw [%2], %%g1\n"
233" sub %%g1, 1, %%g7\n"
234" cas [%2], %%g1, %%g7\n"
235" cmp %%g1, %%g7\n"
236" bne,pn %%icc, 1b\n"
237" cmp %%g7, 1\n"
238" membar #StoreLoad | #StoreStore\n"
239" bl,pn %%icc, 3f\n"
240" nop\n"
241"2:\n"
242" .subsection 2\n"
243"3: mov %2, %%g1\n"
244" save %%sp, -160, %%sp\n"
245" call %3\n"
246" mov %%g1, %%o0\n"
247" ba,pt %%xcc, 2b\n"
248" restore\n"
249" .previous\n"
250 : "=r" (ret)
251 : "0" (ret), "r" (sem), "i" (__down_interruptible)
252 : "g1", "g2", "g3", "g7", "memory", "cc");
253 return ret;
254}
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 51fa773f38c9..051b8d9cb989 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -130,12 +130,6 @@ EXPORT_SYMBOL(_mcount);
130 130
131EXPORT_SYMBOL(sparc64_get_clock_tick); 131EXPORT_SYMBOL(sparc64_get_clock_tick);
132 132
133/* semaphores */
134EXPORT_SYMBOL(down);
135EXPORT_SYMBOL(down_trylock);
136EXPORT_SYMBOL(down_interruptible);
137EXPORT_SYMBOL(up);
138
139/* RW semaphores */ 133/* RW semaphores */
140EXPORT_SYMBOL(__down_read); 134EXPORT_SYMBOL(__down_read);
141EXPORT_SYMBOL(__down_read_trylock); 135EXPORT_SYMBOL(__down_read_trylock);
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386
index 3cd8a04d66d8..e09edfa560da 100644
--- a/arch/um/Kconfig.i386
+++ b/arch/um/Kconfig.i386
@@ -19,10 +19,6 @@ config 64BIT
19 bool 19 bool
20 default n 20 default n
21 21
22config SEMAPHORE_SLEEPERS
23 bool
24 default y
25
26config 3_LEVEL_PGTABLES 22config 3_LEVEL_PGTABLES
27 bool "Three-level pagetables (EXPERIMENTAL)" 23 bool "Three-level pagetables (EXPERIMENTAL)"
28 default n 24 default n
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64
index 6533b349f061..3fbe69e359ed 100644
--- a/arch/um/Kconfig.x86_64
+++ b/arch/um/Kconfig.x86_64
@@ -11,10 +11,6 @@ config RWSEM_GENERIC_SPINLOCK
11 bool 11 bool
12 default y 12 default y
13 13
14config SEMAPHORE_SLEEPERS
15 bool
16 default y
17
18config 3_LEVEL_PGTABLES 14config 3_LEVEL_PGTABLES
19 bool 15 bool
20 default y 16 default y
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c
index 2a1eac1859ce..bfbefd30db8f 100644
--- a/arch/um/sys-i386/ksyms.c
+++ b/arch/um/sys-i386/ksyms.c
@@ -1,17 +1,5 @@
1#include "linux/module.h" 1#include "linux/module.h"
2#include "linux/in6.h"
3#include "linux/rwsem.h"
4#include "asm/byteorder.h"
5#include "asm/delay.h"
6#include "asm/semaphore.h"
7#include "asm/uaccess.h"
8#include "asm/checksum.h" 2#include "asm/checksum.h"
9#include "asm/errno.h"
10
11EXPORT_SYMBOL(__down_failed);
12EXPORT_SYMBOL(__down_failed_interruptible);
13EXPORT_SYMBOL(__down_failed_trylock);
14EXPORT_SYMBOL(__up_wakeup);
15 3
16/* Networking helper routines. */ 4/* Networking helper routines. */
17EXPORT_SYMBOL(csum_partial); 5EXPORT_SYMBOL(csum_partial);
diff --git a/arch/um/sys-ppc/Makefile b/arch/um/sys-ppc/Makefile
index 08901526e893..b8bc844fd2c4 100644
--- a/arch/um/sys-ppc/Makefile
+++ b/arch/um/sys-ppc/Makefile
@@ -3,7 +3,7 @@ OBJ = built-in.o
3.S.o: 3.S.o:
4 $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o 4 $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
5 5
6OBJS = ptrace.o sigcontext.o semaphore.o checksum.o miscthings.o misc.o \ 6OBJS = ptrace.o sigcontext.o checksum.o miscthings.o misc.o \
7 ptrace_user.o sysrq.o 7 ptrace_user.o sysrq.o
8 8
9EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel 9EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel
@@ -20,10 +20,6 @@ ptrace_user.o: ptrace_user.c
20sigcontext.o: sigcontext.c 20sigcontext.o: sigcontext.c
21 $(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $< 21 $(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $<
22 22
23semaphore.c:
24 rm -f $@
25 ln -s $(srctree)/arch/ppc/kernel/$@ $@
26
27checksum.S: 23checksum.S:
28 rm -f $@ 24 rm -f $@
29 ln -s $(srctree)/arch/ppc/lib/$@ $@ 25 ln -s $(srctree)/arch/ppc/lib/$@ $@
@@ -66,4 +62,4 @@ misc.o: misc.S ppc_defs.h
66 $(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o 62 $(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o
67 rm -f asm 63 rm -f asm
68 64
69clean-files := $(OBJS) ppc_defs.h checksum.S semaphore.c mk_defs.c 65clean-files := $(OBJS) ppc_defs.h checksum.S mk_defs.c
diff --git a/arch/um/sys-x86_64/ksyms.c b/arch/um/sys-x86_64/ksyms.c
index 12c593607c59..4d7d1a812d8f 100644
--- a/arch/um/sys-x86_64/ksyms.c
+++ b/arch/um/sys-x86_64/ksyms.c
@@ -1,16 +1,5 @@
1#include "linux/module.h" 1#include "linux/module.h"
2#include "linux/in6.h" 2#include "asm/string.h"
3#include "linux/rwsem.h"
4#include "asm/byteorder.h"
5#include "asm/semaphore.h"
6#include "asm/uaccess.h"
7#include "asm/checksum.h"
8#include "asm/errno.h"
9
10EXPORT_SYMBOL(__down_failed);
11EXPORT_SYMBOL(__down_failed_interruptible);
12EXPORT_SYMBOL(__down_failed_trylock);
13EXPORT_SYMBOL(__up_wakeup);
14 3
15/*XXX: we need them because they would be exported by x86_64 */ 4/*XXX: we need them because they would be exported by x86_64 */
16EXPORT_SYMBOL(__memcpy); 5EXPORT_SYMBOL(__memcpy);
diff --git a/arch/v850/kernel/Makefile b/arch/v850/kernel/Makefile
index 3930482bddc4..da5889c53576 100644
--- a/arch/v850/kernel/Makefile
+++ b/arch/v850/kernel/Makefile
@@ -11,7 +11,7 @@
11 11
12extra-y := head.o init_task.o vmlinux.lds 12extra-y := head.o init_task.o vmlinux.lds
13 13
14obj-y += intv.o entry.o process.o syscalls.o time.o semaphore.o setup.o \ 14obj-y += intv.o entry.o process.o syscalls.o time.o setup.o \
15 signal.o irq.o mach.o ptrace.o bug.o 15 signal.o irq.o mach.o ptrace.o bug.o
16obj-$(CONFIG_MODULES) += module.o v850_ksyms.o 16obj-$(CONFIG_MODULES) += module.o v850_ksyms.o
17# chip-specific code 17# chip-specific code
diff --git a/arch/v850/kernel/semaphore.c b/arch/v850/kernel/semaphore.c
deleted file mode 100644
index fc89fd661c99..000000000000
--- a/arch/v850/kernel/semaphore.c
+++ /dev/null
@@ -1,166 +0,0 @@
1/*
2 * arch/v850/kernel/semaphore.c -- Semaphore support
3 *
4 * Copyright (C) 1998-2000 IBM Corporation
5 * Copyright (C) 1999 Linus Torvalds
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * This file is a copy of the s390 version, arch/s390/kernel/semaphore.c
12 * Author(s): Martin Schwidefsky
13 * which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c
14 */
15
16#include <linux/errno.h>
17#include <linux/sched.h>
18#include <linux/init.h>
19
20#include <asm/semaphore.h>
21
22/*
23 * Semaphores are implemented using a two-way counter:
24 * The "count" variable is decremented for each process
25 * that tries to acquire the semaphore, while the "sleeping"
26 * variable is a count of such acquires.
27 *
28 * Notably, the inline "up()" and "down()" functions can
29 * efficiently test if they need to do any extra work (up
30 * needs to do something only if count was negative before
31 * the increment operation.
32 *
33 * "sleeping" and the contention routine ordering is
34 * protected by the semaphore spinlock.
35 *
36 * Note that these functions are only called when there is
37 * contention on the lock, and as such all this is the
38 * "non-critical" part of the whole semaphore business. The
39 * critical part is the inline stuff in <asm/semaphore.h>
40 * where we want to avoid any extra jumps and calls.
41 */
42
43/*
44 * Logic:
45 * - only on a boundary condition do we need to care. When we go
46 * from a negative count to a non-negative, we wake people up.
47 * - when we go from a non-negative count to a negative do we
48 * (a) synchronize with the "sleeper" count and (b) make sure
49 * that we're on the wakeup list before we synchronize so that
50 * we cannot lose wakeup events.
51 */
52
53void __up(struct semaphore *sem)
54{
55 wake_up(&sem->wait);
56}
57
58static DEFINE_SPINLOCK(semaphore_lock);
59
60void __sched __down(struct semaphore * sem)
61{
62 struct task_struct *tsk = current;
63 DECLARE_WAITQUEUE(wait, tsk);
64 tsk->state = TASK_UNINTERRUPTIBLE;
65 add_wait_queue_exclusive(&sem->wait, &wait);
66
67 spin_lock_irq(&semaphore_lock);
68 sem->sleepers++;
69 for (;;) {
70 int sleepers = sem->sleepers;
71
72 /*
73 * Add "everybody else" into it. They aren't
74 * playing, because we own the spinlock.
75 */
76 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
77 sem->sleepers = 0;
78 break;
79 }
80 sem->sleepers = 1; /* us - see -1 above */
81 spin_unlock_irq(&semaphore_lock);
82
83 schedule();
84 tsk->state = TASK_UNINTERRUPTIBLE;
85 spin_lock_irq(&semaphore_lock);
86 }
87 spin_unlock_irq(&semaphore_lock);
88 remove_wait_queue(&sem->wait, &wait);
89 tsk->state = TASK_RUNNING;
90 wake_up(&sem->wait);
91}
92
93int __sched __down_interruptible(struct semaphore * sem)
94{
95 int retval = 0;
96 struct task_struct *tsk = current;
97 DECLARE_WAITQUEUE(wait, tsk);
98 tsk->state = TASK_INTERRUPTIBLE;
99 add_wait_queue_exclusive(&sem->wait, &wait);
100
101 spin_lock_irq(&semaphore_lock);
102 sem->sleepers ++;
103 for (;;) {
104 int sleepers = sem->sleepers;
105
106 /*
107 * With signals pending, this turns into
108 * the trylock failure case - we won't be
109 * sleeping, and we* can't get the lock as
110 * it has contention. Just correct the count
111 * and exit.
112 */
113 if (signal_pending(current)) {
114 retval = -EINTR;
115 sem->sleepers = 0;
116 atomic_add(sleepers, &sem->count);
117 break;
118 }
119
120 /*
121 * Add "everybody else" into it. They aren't
122 * playing, because we own the spinlock. The
123 * "-1" is because we're still hoping to get
124 * the lock.
125 */
126 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
127 sem->sleepers = 0;
128 break;
129 }
130 sem->sleepers = 1; /* us - see -1 above */
131 spin_unlock_irq(&semaphore_lock);
132
133 schedule();
134 tsk->state = TASK_INTERRUPTIBLE;
135 spin_lock_irq(&semaphore_lock);
136 }
137 spin_unlock_irq(&semaphore_lock);
138 tsk->state = TASK_RUNNING;
139 remove_wait_queue(&sem->wait, &wait);
140 wake_up(&sem->wait);
141 return retval;
142}
143
144/*
145 * Trylock failed - make sure we correct for
146 * having decremented the count.
147 */
148int __down_trylock(struct semaphore * sem)
149{
150 unsigned long flags;
151 int sleepers;
152
153 spin_lock_irqsave(&semaphore_lock, flags);
154 sleepers = sem->sleepers + 1;
155 sem->sleepers = 0;
156
157 /*
158 * Add "everybody else" and us into it. They aren't
159 * playing, because we own the spinlock.
160 */
161 if (!atomic_add_negative(sleepers, &sem->count))
162 wake_up(&sem->wait);
163
164 spin_unlock_irqrestore(&semaphore_lock, flags);
165 return 1;
166}
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c
index 93575fdc874d..8d386a5dbc4a 100644
--- a/arch/v850/kernel/v850_ksyms.c
+++ b/arch/v850/kernel/v850_ksyms.c
@@ -11,7 +11,6 @@
11#include <asm/pgalloc.h> 11#include <asm/pgalloc.h>
12#include <asm/irq.h> 12#include <asm/irq.h>
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/semaphore.h>
15#include <asm/checksum.h> 14#include <asm/checksum.h>
16#include <asm/current.h> 15#include <asm/current.h>
17 16
@@ -34,12 +33,6 @@ EXPORT_SYMBOL (memset);
34EXPORT_SYMBOL (memcpy); 33EXPORT_SYMBOL (memcpy);
35EXPORT_SYMBOL (memmove); 34EXPORT_SYMBOL (memmove);
36 35
37/* semaphores */
38EXPORT_SYMBOL (__down);
39EXPORT_SYMBOL (__down_interruptible);
40EXPORT_SYMBOL (__down_trylock);
41EXPORT_SYMBOL (__up);
42
43/* 36/*
44 * libgcc functions - functions that are used internally by the 37 * libgcc functions - functions that are used internally by the
45 * compiler... (prototypes are not correct though, but that 38 * compiler... (prototypes are not correct though, but that
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6c70fed0f9a0..e4b38861ea52 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -53,9 +53,6 @@ config STACKTRACE_SUPPORT
53config HAVE_LATENCYTOP_SUPPORT 53config HAVE_LATENCYTOP_SUPPORT
54 def_bool y 54 def_bool y
55 55
56config SEMAPHORE_SLEEPERS
57 def_bool y
58
59config FAST_CMPXCHG_LOCAL 56config FAST_CMPXCHG_LOCAL
60 bool 57 bool
61 default y 58 default y
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 061627806a2d..deb43785e923 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -1,13 +1,8 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <asm/semaphore.h>
3#include <asm/checksum.h> 2#include <asm/checksum.h>
4#include <asm/desc.h> 3#include <asm/desc.h>
5#include <asm/pgtable.h> 4#include <asm/pgtable.h>
6 5
7EXPORT_SYMBOL(__down_failed);
8EXPORT_SYMBOL(__down_failed_interruptible);
9EXPORT_SYMBOL(__down_failed_trylock);
10EXPORT_SYMBOL(__up_wakeup);
11/* Networking helper routines. */ 6/* Networking helper routines. */
12EXPORT_SYMBOL(csum_partial_copy_generic); 7EXPORT_SYMBOL(csum_partial_copy_generic);
13 8
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index a66e9c1a0537..95a993e18165 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -4,7 +4,6 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/smp.h> 5#include <linux/smp.h>
6 6
7#include <asm/semaphore.h>
8#include <asm/processor.h> 7#include <asm/processor.h>
9#include <asm/uaccess.h> 8#include <asm/uaccess.h>
10#include <asm/pgtable.h> 9#include <asm/pgtable.h>
@@ -12,11 +11,6 @@
12 11
13EXPORT_SYMBOL(kernel_thread); 12EXPORT_SYMBOL(kernel_thread);
14 13
15EXPORT_SYMBOL(__down_failed);
16EXPORT_SYMBOL(__down_failed_interruptible);
17EXPORT_SYMBOL(__down_failed_trylock);
18EXPORT_SYMBOL(__up_wakeup);
19
20EXPORT_SYMBOL(__get_user_1); 14EXPORT_SYMBOL(__get_user_1);
21EXPORT_SYMBOL(__get_user_2); 15EXPORT_SYMBOL(__get_user_2);
22EXPORT_SYMBOL(__get_user_4); 16EXPORT_SYMBOL(__get_user_4);
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S
index 3899bd37fdf0..648fe4741782 100644
--- a/arch/x86/lib/semaphore_32.S
+++ b/arch/x86/lib/semaphore_32.S
@@ -30,89 +30,6 @@
30 * value or just clobbered.. 30 * value or just clobbered..
31 */ 31 */
32 .section .sched.text, "ax" 32 .section .sched.text, "ax"
33ENTRY(__down_failed)
34 CFI_STARTPROC
35 FRAME
36 pushl %edx
37 CFI_ADJUST_CFA_OFFSET 4
38 CFI_REL_OFFSET edx,0
39 pushl %ecx
40 CFI_ADJUST_CFA_OFFSET 4
41 CFI_REL_OFFSET ecx,0
42 call __down
43 popl %ecx
44 CFI_ADJUST_CFA_OFFSET -4
45 CFI_RESTORE ecx
46 popl %edx
47 CFI_ADJUST_CFA_OFFSET -4
48 CFI_RESTORE edx
49 ENDFRAME
50 ret
51 CFI_ENDPROC
52 ENDPROC(__down_failed)
53
54ENTRY(__down_failed_interruptible)
55 CFI_STARTPROC
56 FRAME
57 pushl %edx
58 CFI_ADJUST_CFA_OFFSET 4
59 CFI_REL_OFFSET edx,0
60 pushl %ecx
61 CFI_ADJUST_CFA_OFFSET 4
62 CFI_REL_OFFSET ecx,0
63 call __down_interruptible
64 popl %ecx
65 CFI_ADJUST_CFA_OFFSET -4
66 CFI_RESTORE ecx
67 popl %edx
68 CFI_ADJUST_CFA_OFFSET -4
69 CFI_RESTORE edx
70 ENDFRAME
71 ret
72 CFI_ENDPROC
73 ENDPROC(__down_failed_interruptible)
74
75ENTRY(__down_failed_trylock)
76 CFI_STARTPROC
77 FRAME
78 pushl %edx
79 CFI_ADJUST_CFA_OFFSET 4
80 CFI_REL_OFFSET edx,0
81 pushl %ecx
82 CFI_ADJUST_CFA_OFFSET 4
83 CFI_REL_OFFSET ecx,0
84 call __down_trylock
85 popl %ecx
86 CFI_ADJUST_CFA_OFFSET -4
87 CFI_RESTORE ecx
88 popl %edx
89 CFI_ADJUST_CFA_OFFSET -4
90 CFI_RESTORE edx
91 ENDFRAME
92 ret
93 CFI_ENDPROC
94 ENDPROC(__down_failed_trylock)
95
96ENTRY(__up_wakeup)
97 CFI_STARTPROC
98 FRAME
99 pushl %edx
100 CFI_ADJUST_CFA_OFFSET 4
101 CFI_REL_OFFSET edx,0
102 pushl %ecx
103 CFI_ADJUST_CFA_OFFSET 4
104 CFI_REL_OFFSET ecx,0
105 call __up
106 popl %ecx
107 CFI_ADJUST_CFA_OFFSET -4
108 CFI_RESTORE ecx
109 popl %edx
110 CFI_ADJUST_CFA_OFFSET -4
111 CFI_RESTORE edx
112 ENDFRAME
113 ret
114 CFI_ENDPROC
115 ENDPROC(__up_wakeup)
116 33
117/* 34/*
118 * rw spinlock fallbacks 35 * rw spinlock fallbacks
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index 8b92d428ab02..e009251d4e9f 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -41,11 +41,6 @@
41 thunk rwsem_downgrade_thunk,rwsem_downgrade_wake 41 thunk rwsem_downgrade_thunk,rwsem_downgrade_wake
42#endif 42#endif
43 43
44 thunk __down_failed,__down
45 thunk_retrax __down_failed_interruptible,__down_interruptible
46 thunk_retrax __down_failed_trylock,__down_trylock
47 thunk __up_wakeup,__up
48
49#ifdef CONFIG_TRACE_IRQFLAGS 44#ifdef CONFIG_TRACE_IRQFLAGS
50 thunk trace_hardirqs_on_thunk,trace_hardirqs_on 45 thunk trace_hardirqs_on_thunk,trace_hardirqs_on
51 thunk trace_hardirqs_off_thunk,trace_hardirqs_off 46 thunk trace_hardirqs_off_thunk,trace_hardirqs_off
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index f582d6a24ec2..7419dbccf027 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -5,7 +5,7 @@
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6 6
7 7
8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ 8obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \
9 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ 9 setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \
10 pci-dma.o init_task.o io.o 10 pci-dma.o init_task.o io.o
11 11
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c
deleted file mode 100644
index 995c6410ae10..000000000000
--- a/arch/xtensa/kernel/semaphore.c
+++ /dev/null
@@ -1,226 +0,0 @@
1/*
2 * arch/xtensa/kernel/semaphore.c
3 *
4 * Generic semaphore code. Buyer beware. Do your own specific changes
5 * in <asm/semaphore-helper.h>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2001 - 2005 Tensilica Inc.
12 *
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Chris Zankel <chris@zankel.net>
15 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
16 * Kevin Chea
17 */
18
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/init.h>
22#include <asm/semaphore.h>
23#include <asm/errno.h>
24
25/*
26 * These two _must_ execute atomically wrt each other.
27 */
28
29static __inline__ void wake_one_more(struct semaphore * sem)
30{
31 atomic_inc((atomic_t *)&sem->sleepers);
32}
33
34static __inline__ int waking_non_zero(struct semaphore *sem)
35{
36 unsigned long flags;
37 int ret = 0;
38
39 spin_lock_irqsave(&semaphore_wake_lock, flags);
40 if (sem->sleepers > 0) {
41 sem->sleepers--;
42 ret = 1;
43 }
44 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
45 return ret;
46}
47
48/*
49 * waking_non_zero_interruptible:
50 * 1 got the lock
51 * 0 go to sleep
52 * -EINTR interrupted
53 *
54 * We must undo the sem->count down_interruptible() increment while we are
55 * protected by the spinlock in order to make atomic this atomic_inc() with the
56 * atomic_read() in wake_one_more(), otherwise we can race. -arca
57 */
58
59static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
60 struct task_struct *tsk)
61{
62 unsigned long flags;
63 int ret = 0;
64
65 spin_lock_irqsave(&semaphore_wake_lock, flags);
66 if (sem->sleepers > 0) {
67 sem->sleepers--;
68 ret = 1;
69 } else if (signal_pending(tsk)) {
70 atomic_inc(&sem->count);
71 ret = -EINTR;
72 }
73 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
74 return ret;
75}
76
77/*
78 * waking_non_zero_trylock:
79 * 1 failed to lock
80 * 0 got the lock
81 *
82 * We must undo the sem->count down_trylock() increment while we are
83 * protected by the spinlock in order to make atomic this atomic_inc() with the
84 * atomic_read() in wake_one_more(), otherwise we can race. -arca
85 */
86
87static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
88{
89 unsigned long flags;
90 int ret = 1;
91
92 spin_lock_irqsave(&semaphore_wake_lock, flags);
93 if (sem->sleepers <= 0)
94 atomic_inc(&sem->count);
95 else {
96 sem->sleepers--;
97 ret = 0;
98 }
99 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
100 return ret;
101}
102
103DEFINE_SPINLOCK(semaphore_wake_lock);
104
105/*
106 * Semaphores are implemented using a two-way counter:
107 * The "count" variable is decremented for each process
108 * that tries to sleep, while the "waking" variable is
109 * incremented when the "up()" code goes to wake up waiting
110 * processes.
111 *
112 * Notably, the inline "up()" and "down()" functions can
113 * efficiently test if they need to do any extra work (up
114 * needs to do something only if count was negative before
115 * the increment operation.
116 *
117 * waking_non_zero() (from asm/semaphore.h) must execute
118 * atomically.
119 *
120 * When __up() is called, the count was negative before
121 * incrementing it, and we need to wake up somebody.
122 *
123 * This routine adds one to the count of processes that need to
124 * wake up and exit. ALL waiting processes actually wake up but
125 * only the one that gets to the "waking" field first will gate
126 * through and acquire the semaphore. The others will go back
127 * to sleep.
128 *
129 * Note that these functions are only called when there is
130 * contention on the lock, and as such all this is the
131 * "non-critical" part of the whole semaphore business. The
132 * critical part is the inline stuff in <asm/semaphore.h>
133 * where we want to avoid any extra jumps and calls.
134 */
135
136void __up(struct semaphore *sem)
137{
138 wake_one_more(sem);
139 wake_up(&sem->wait);
140}
141
142/*
143 * Perform the "down" function. Return zero for semaphore acquired,
144 * return negative for signalled out of the function.
145 *
146 * If called from __down, the return is ignored and the wait loop is
147 * not interruptible. This means that a task waiting on a semaphore
148 * using "down()" cannot be killed until someone does an "up()" on
149 * the semaphore.
150 *
151 * If called from __down_interruptible, the return value gets checked
152 * upon return. If the return value is negative then the task continues
153 * with the negative value in the return register (it can be tested by
154 * the caller).
155 *
156 * Either form may be used in conjunction with "up()".
157 *
158 */
159
160#define DOWN_VAR \
161 struct task_struct *tsk = current; \
162 wait_queue_t wait; \
163 init_waitqueue_entry(&wait, tsk);
164
165#define DOWN_HEAD(task_state) \
166 \
167 \
168 tsk->state = (task_state); \
169 add_wait_queue(&sem->wait, &wait); \
170 \
171 /* \
172 * Ok, we're set up. sem->count is known to be less than zero \
173 * so we must wait. \
174 * \
175 * We can let go the lock for purposes of waiting. \
176 * We re-acquire it after awaking so as to protect \
177 * all semaphore operations. \
178 * \
179 * If "up()" is called before we call waking_non_zero() then \
180 * we will catch it right away. If it is called later then \
181 * we will have to go through a wakeup cycle to catch it. \
182 * \
183 * Multiple waiters contend for the semaphore lock to see \
184 * who gets to gate through and who has to wait some more. \
185 */ \
186 for (;;) {
187
188#define DOWN_TAIL(task_state) \
189 tsk->state = (task_state); \
190 } \
191 tsk->state = TASK_RUNNING; \
192 remove_wait_queue(&sem->wait, &wait);
193
194void __sched __down(struct semaphore * sem)
195{
196 DOWN_VAR
197 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
198 if (waking_non_zero(sem))
199 break;
200 schedule();
201 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
202}
203
204int __sched __down_interruptible(struct semaphore * sem)
205{
206 int ret = 0;
207 DOWN_VAR
208 DOWN_HEAD(TASK_INTERRUPTIBLE)
209
210 ret = waking_non_zero_interruptible(sem, tsk);
211 if (ret)
212 {
213 if (ret == 1)
214 /* ret != 0 only if we get interrupted -arca */
215 ret = 0;
216 break;
217 }
218 schedule();
219 DOWN_TAIL(TASK_INTERRUPTIBLE)
220 return ret;
221}
222
223int __down_trylock(struct semaphore * sem)
224{
225 return waking_non_zero_trylock(sem);
226}
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 60dbdb43fb4c..6e52cdd6166f 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -26,7 +26,6 @@
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/page.h> 27#include <asm/page.h>
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
29#include <asm/semaphore.h>
30#ifdef CONFIG_BLK_DEV_FD 29#ifdef CONFIG_BLK_DEV_FD
31#include <asm/floppy.h> 30#include <asm/floppy.h>
32#endif 31#endif
@@ -71,14 +70,6 @@ EXPORT_SYMBOL(__umodsi3);
71EXPORT_SYMBOL(__udivdi3); 70EXPORT_SYMBOL(__udivdi3);
72EXPORT_SYMBOL(__umoddi3); 71EXPORT_SYMBOL(__umoddi3);
73 72
74/*
75 * Semaphore operations
76 */
77EXPORT_SYMBOL(__down);
78EXPORT_SYMBOL(__down_interruptible);
79EXPORT_SYMBOL(__down_trylock);
80EXPORT_SYMBOL(__up);
81
82#ifdef CONFIG_NET 73#ifdef CONFIG_NET
83/* 74/*
84 * Networking support 75 * Networking support
diff --git a/include/asm-alpha/semaphore.h b/include/asm-alpha/semaphore.h
index f1e9278a9fe2..d9b2034ed1d2 100644
--- a/include/asm-alpha/semaphore.h
+++ b/include/asm-alpha/semaphore.h
@@ -1,149 +1 @@
1#ifndef _ALPHA_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ALPHA_SEMAPHORE_H
3
4/*
5 * SMP- and interrupt-safe semaphores..
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996, 2000 Richard Henderson
9 */
10
11#include <asm/current.h>
12#include <asm/system.h>
13#include <asm/atomic.h>
14#include <linux/compiler.h>
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17
18struct semaphore {
19 atomic_t count;
20 wait_queue_head_t wait;
21};
22
23#define __SEMAPHORE_INITIALIZER(name, n) \
24{ \
25 .count = ATOMIC_INIT(n), \
26 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
27}
28
29#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
30 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
31
32#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
33
34static inline void sema_init(struct semaphore *sem, int val)
35{
36 /*
37 * Logically,
38 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
39 * except that gcc produces better initializing by parts yet.
40 */
41
42 atomic_set(&sem->count, val);
43 init_waitqueue_head(&sem->wait);
44}
45
46static inline void init_MUTEX (struct semaphore *sem)
47{
48 sema_init(sem, 1);
49}
50
51static inline void init_MUTEX_LOCKED (struct semaphore *sem)
52{
53 sema_init(sem, 0);
54}
55
56extern void down(struct semaphore *);
57extern void __down_failed(struct semaphore *);
58extern int down_interruptible(struct semaphore *);
59extern int __down_failed_interruptible(struct semaphore *);
60extern int down_trylock(struct semaphore *);
61extern void up(struct semaphore *);
62extern void __up_wakeup(struct semaphore *);
63
64/*
65 * Hidden out of line code is fun, but extremely messy. Rely on newer
66 * compilers to do a respectable job with this. The contention cases
67 * are handled out of line in arch/alpha/kernel/semaphore.c.
68 */
69
70static inline void __down(struct semaphore *sem)
71{
72 long count;
73 might_sleep();
74 count = atomic_dec_return(&sem->count);
75 if (unlikely(count < 0))
76 __down_failed(sem);
77}
78
79static inline int __down_interruptible(struct semaphore *sem)
80{
81 long count;
82 might_sleep();
83 count = atomic_dec_return(&sem->count);
84 if (unlikely(count < 0))
85 return __down_failed_interruptible(sem);
86 return 0;
87}
88
89/*
90 * down_trylock returns 0 on success, 1 if we failed to get the lock.
91 */
92
93static inline int __down_trylock(struct semaphore *sem)
94{
95 long ret;
96
97 /* "Equivalent" C:
98
99 do {
100 ret = ldl_l;
101 --ret;
102 if (ret < 0)
103 break;
104 ret = stl_c = ret;
105 } while (ret == 0);
106 */
107 __asm__ __volatile__(
108 "1: ldl_l %0,%1\n"
109 " subl %0,1,%0\n"
110 " blt %0,2f\n"
111 " stl_c %0,%1\n"
112 " beq %0,3f\n"
113 " mb\n"
114 "2:\n"
115 ".subsection 2\n"
116 "3: br 1b\n"
117 ".previous"
118 : "=&r" (ret), "=m" (sem->count)
119 : "m" (sem->count));
120
121 return ret < 0;
122}
123
124static inline void __up(struct semaphore *sem)
125{
126 if (unlikely(atomic_inc_return(&sem->count) <= 0))
127 __up_wakeup(sem);
128}
129
130#if !defined(CONFIG_DEBUG_SEMAPHORE)
131extern inline void down(struct semaphore *sem)
132{
133 __down(sem);
134}
135extern inline int down_interruptible(struct semaphore *sem)
136{
137 return __down_interruptible(sem);
138}
139extern inline int down_trylock(struct semaphore *sem)
140{
141 return __down_trylock(sem);
142}
143extern inline void up(struct semaphore *sem)
144{
145 __up(sem);
146}
147#endif
148
149#endif
diff --git a/include/asm-arm/semaphore-helper.h b/include/asm-arm/semaphore-helper.h
deleted file mode 100644
index 1d7f1987edb9..000000000000
--- a/include/asm-arm/semaphore-helper.h
+++ /dev/null
@@ -1,84 +0,0 @@
1#ifndef ASMARM_SEMAPHORE_HELPER_H
2#define ASMARM_SEMAPHORE_HELPER_H
3
4/*
5 * These two _must_ execute atomically wrt each other.
6 */
7static inline void wake_one_more(struct semaphore * sem)
8{
9 unsigned long flags;
10
11 spin_lock_irqsave(&semaphore_wake_lock, flags);
12 if (atomic_read(&sem->count) <= 0)
13 sem->waking++;
14 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
15}
16
17static inline int waking_non_zero(struct semaphore *sem)
18{
19 unsigned long flags;
20 int ret = 0;
21
22 spin_lock_irqsave(&semaphore_wake_lock, flags);
23 if (sem->waking > 0) {
24 sem->waking--;
25 ret = 1;
26 }
27 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
28 return ret;
29}
30
31/*
32 * waking non zero interruptible
33 * 1 got the lock
34 * 0 go to sleep
35 * -EINTR interrupted
36 *
37 * We must undo the sem->count down_interruptible() increment while we are
38 * protected by the spinlock in order to make this atomic_inc() with the
39 * atomic_read() in wake_one_more(), otherwise we can race. -arca
40 */
41static inline int waking_non_zero_interruptible(struct semaphore *sem,
42 struct task_struct *tsk)
43{
44 unsigned long flags;
45 int ret = 0;
46
47 spin_lock_irqsave(&semaphore_wake_lock, flags);
48 if (sem->waking > 0) {
49 sem->waking--;
50 ret = 1;
51 } else if (signal_pending(tsk)) {
52 atomic_inc(&sem->count);
53 ret = -EINTR;
54 }
55 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
56 return ret;
57}
58
59/*
60 * waking_non_zero_try_lock:
61 * 1 failed to lock
62 * 0 got the lock
63 *
64 * We must undo the sem->count down_interruptible() increment while we are
65 * protected by the spinlock in order to make this atomic_inc() with the
66 * atomic_read() in wake_one_more(), otherwise we can race. -arca
67 */
68static inline int waking_non_zero_trylock(struct semaphore *sem)
69{
70 unsigned long flags;
71 int ret = 1;
72
73 spin_lock_irqsave(&semaphore_wake_lock, flags);
74 if (sem->waking <= 0)
75 atomic_inc(&sem->count);
76 else {
77 sem->waking--;
78 ret = 0;
79 }
80 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
81 return ret;
82}
83
84#endif
diff --git a/include/asm-arm/semaphore.h b/include/asm-arm/semaphore.h
index 1c8b441f89e3..d9b2034ed1d2 100644
--- a/include/asm-arm/semaphore.h
+++ b/include/asm-arm/semaphore.h
@@ -1,98 +1 @@
1/* #include <linux/semaphore.h>
2 * linux/include/asm-arm/semaphore.h
3 */
4#ifndef __ASM_ARM_SEMAPHORE_H
5#define __ASM_ARM_SEMAPHORE_H
6
7#include <linux/linkage.h>
8#include <linux/spinlock.h>
9#include <linux/wait.h>
10#include <linux/rwsem.h>
11
12#include <asm/atomic.h>
13#include <asm/locks.h>
14
15struct semaphore {
16 atomic_t count;
17 int sleepers;
18 wait_queue_head_t wait;
19};
20
21#define __SEMAPHORE_INIT(name, cnt) \
22{ \
23 .count = ATOMIC_INIT(cnt), \
24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
25}
26
27#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
28 struct semaphore name = __SEMAPHORE_INIT(name,count)
29
30#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
31
32static inline void sema_init(struct semaphore *sem, int val)
33{
34 atomic_set(&sem->count, val);
35 sem->sleepers = 0;
36 init_waitqueue_head(&sem->wait);
37}
38
39static inline void init_MUTEX(struct semaphore *sem)
40{
41 sema_init(sem, 1);
42}
43
44static inline void init_MUTEX_LOCKED(struct semaphore *sem)
45{
46 sema_init(sem, 0);
47}
48
49/*
50 * special register calling convention
51 */
52asmlinkage void __down_failed(void);
53asmlinkage int __down_interruptible_failed(void);
54asmlinkage int __down_trylock_failed(void);
55asmlinkage void __up_wakeup(void);
56
57extern void __down(struct semaphore * sem);
58extern int __down_interruptible(struct semaphore * sem);
59extern int __down_trylock(struct semaphore * sem);
60extern void __up(struct semaphore * sem);
61
62/*
63 * This is ugly, but we want the default case to fall through.
64 * "__down" is the actual routine that waits...
65 */
66static inline void down(struct semaphore * sem)
67{
68 might_sleep();
69 __down_op(sem, __down_failed);
70}
71
72/*
73 * This is ugly, but we want the default case to fall through.
74 * "__down_interruptible" is the actual routine that waits...
75 */
76static inline int down_interruptible (struct semaphore * sem)
77{
78 might_sleep();
79 return __down_op_ret(sem, __down_interruptible_failed);
80}
81
82static inline int down_trylock(struct semaphore *sem)
83{
84 return __down_op_ret(sem, __down_trylock_failed);
85}
86
87/*
88 * Note! This is subtle. We jump to wake people up only if
89 * the semaphore was negative (== somebody was waiting on it).
90 * The default case (no contention) will result in NO
91 * jumps for both down() and up().
92 */
93static inline void up(struct semaphore * sem)
94{
95 __up_op(sem, __up_wakeup);
96}
97
98#endif
diff --git a/include/asm-avr32/semaphore.h b/include/asm-avr32/semaphore.h
index feaf1d453386..d9b2034ed1d2 100644
--- a/include/asm-avr32/semaphore.h
+++ b/include/asm-avr32/semaphore.h
@@ -1,108 +1 @@
1/* #include <linux/semaphore.h>
2 * SMP- and interrupt-safe semaphores.
3 *
4 * Copyright (C) 2006 Atmel Corporation
5 *
6 * Based on include/asm-i386/semaphore.h
7 * Copyright (C) 1996 Linus Torvalds
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#ifndef __ASM_AVR32_SEMAPHORE_H
14#define __ASM_AVR32_SEMAPHORE_H
15
16#include <linux/linkage.h>
17
18#include <asm/system.h>
19#include <asm/atomic.h>
20#include <linux/wait.h>
21#include <linux/rwsem.h>
22
23struct semaphore {
24 atomic_t count;
25 int sleepers;
26 wait_queue_head_t wait;
27};
28
29#define __SEMAPHORE_INITIALIZER(name, n) \
30{ \
31 .count = ATOMIC_INIT(n), \
32 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
33}
34
35#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
36 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
37
38#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
39
40static inline void sema_init (struct semaphore *sem, int val)
41{
42 atomic_set(&sem->count, val);
43 sem->sleepers = 0;
44 init_waitqueue_head(&sem->wait);
45}
46
47static inline void init_MUTEX (struct semaphore *sem)
48{
49 sema_init(sem, 1);
50}
51
52static inline void init_MUTEX_LOCKED (struct semaphore *sem)
53{
54 sema_init(sem, 0);
55}
56
57void __down(struct semaphore * sem);
58int __down_interruptible(struct semaphore * sem);
59void __up(struct semaphore * sem);
60
61/*
62 * This is ugly, but we want the default case to fall through.
63 * "__down_failed" is a special asm handler that calls the C
64 * routine that actually waits. See arch/i386/kernel/semaphore.c
65 */
66static inline void down(struct semaphore * sem)
67{
68 might_sleep();
69 if (unlikely(atomic_dec_return (&sem->count) < 0))
70 __down (sem);
71}
72
73/*
74 * Interruptible try to acquire a semaphore. If we obtained
75 * it, return zero. If we were interrupted, returns -EINTR
76 */
77static inline int down_interruptible(struct semaphore * sem)
78{
79 int ret = 0;
80
81 might_sleep();
82 if (unlikely(atomic_dec_return (&sem->count) < 0))
83 ret = __down_interruptible (sem);
84 return ret;
85}
86
87/*
88 * Non-blockingly attempt to down() a semaphore.
89 * Returns zero if we acquired it
90 */
91static inline int down_trylock(struct semaphore * sem)
92{
93 return atomic_dec_if_positive(&sem->count) < 0;
94}
95
96/*
97 * Note! This is subtle. We jump to wake people up only if
98 * the semaphore was negative (== somebody was waiting on it).
99 * The default case (no contention) will result in NO
100 * jumps for both down() and up().
101 */
102static inline void up(struct semaphore * sem)
103{
104 if (unlikely(atomic_inc_return (&sem->count) <= 0))
105 __up (sem);
106}
107
108#endif /*__ASM_AVR32_SEMAPHORE_H */
diff --git a/include/asm-blackfin/semaphore-helper.h b/include/asm-blackfin/semaphore-helper.h
deleted file mode 100644
index 9082b0dc3eb5..000000000000
--- a/include/asm-blackfin/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
1/* Based on M68K version, Lineo Inc. May 2001 */
2
3#ifndef _BFIN_SEMAPHORE_HELPER_H
4#define _BFIN_SEMAPHORE_HELPER_H
5
6/*
7 * SMP- and interrupt-safe semaphores helper functions.
8 *
9 * (C) Copyright 1996 Linus Torvalds
10 *
11 */
12
13#include <asm/errno.h>
14
15/*
16 * These two _must_ execute atomically wrt each other.
17 */
18static inline void wake_one_more(struct semaphore *sem)
19{
20 atomic_inc(&sem->waking);
21}
22
23static inline int waking_non_zero(struct semaphore *sem)
24{
25 int ret;
26 unsigned long flags = 0;
27
28 spin_lock_irqsave(&semaphore_wake_lock, flags);
29 ret = 0;
30 if (atomic_read(&sem->waking) > 0) {
31 atomic_dec(&sem->waking);
32 ret = 1;
33 }
34 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
35 return ret;
36}
37
38/*
39 * waking_non_zero_interruptible:
40 * 1 got the lock
41 * 0 go to sleep
42 * -EINTR interrupted
43 */
44static inline int waking_non_zero_interruptible(struct semaphore *sem,
45 struct task_struct *tsk)
46{
47 int ret = 0;
48 unsigned long flags = 0;
49
50 spin_lock_irqsave(&semaphore_wake_lock, flags);
51 if (atomic_read(&sem->waking) > 0) {
52 atomic_dec(&sem->waking);
53 ret = 1;
54 } else if (signal_pending(tsk)) {
55 atomic_inc(&sem->count);
56 ret = -EINTR;
57 }
58 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
59 return ret;
60}
61
62/*
63 * waking_non_zero_trylock:
64 * 1 failed to lock
65 * 0 got the lock
66 */
67static inline int waking_non_zero_trylock(struct semaphore *sem)
68{
69 int ret = 1;
70 unsigned long flags = 0;
71
72 spin_lock_irqsave(&semaphore_wake_lock, flags);
73 if (atomic_read(&sem->waking) > 0) {
74 atomic_dec(&sem->waking);
75 ret = 0;
76 } else
77 atomic_inc(&sem->count);
78 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
79 return ret;
80}
81
82#endif /* _BFIN_SEMAPHORE_HELPER_H */
diff --git a/include/asm-blackfin/semaphore.h b/include/asm-blackfin/semaphore.h
index 533f90fb2e4e..d9b2034ed1d2 100644
--- a/include/asm-blackfin/semaphore.h
+++ b/include/asm-blackfin/semaphore.h
@@ -1,105 +1 @@
1#ifndef _BFIN_SEMAPHORE_H #include <linux/semaphore.h>
2#define _BFIN_SEMAPHORE_H
3
4#ifndef __ASSEMBLY__
5
6#include <linux/linkage.h>
7#include <linux/wait.h>
8#include <linux/spinlock.h>
9#include <linux/rwsem.h>
10#include <asm/atomic.h>
11
12/*
13 * Interrupt-safe semaphores..
14 *
15 * (C) Copyright 1996 Linus Torvalds
16 *
17 * BFIN version by akbar hussain Lineo Inc April 2001
18 *
19 */
20
21struct semaphore {
22 atomic_t count;
23 int sleepers;
24 wait_queue_head_t wait;
25};
26
27#define __SEMAPHORE_INITIALIZER(name, n) \
28{ \
29 .count = ATOMIC_INIT(n), \
30 .sleepers = 0, \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38
39static inline void sema_init(struct semaphore *sem, int val)
40{
41 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
42}
43
44static inline void init_MUTEX(struct semaphore *sem)
45{
46 sema_init(sem, 1);
47}
48
49static inline void init_MUTEX_LOCKED(struct semaphore *sem)
50{
51 sema_init(sem, 0);
52}
53
54asmlinkage void __down(struct semaphore *sem);
55asmlinkage int __down_interruptible(struct semaphore *sem);
56asmlinkage int __down_trylock(struct semaphore *sem);
57asmlinkage void __up(struct semaphore *sem);
58
59extern spinlock_t semaphore_wake_lock;
60
61/*
62 * This is ugly, but we want the default case to fall through.
63 * "down_failed" is a special asm handler that calls the C
64 * routine that actually waits.
65 */
66static inline void down(struct semaphore *sem)
67{
68 might_sleep();
69 if (atomic_dec_return(&sem->count) < 0)
70 __down(sem);
71}
72
73static inline int down_interruptible(struct semaphore *sem)
74{
75 int ret = 0;
76
77 might_sleep();
78 if (atomic_dec_return(&sem->count) < 0)
79 ret = __down_interruptible(sem);
80 return (ret);
81}
82
83static inline int down_trylock(struct semaphore *sem)
84{
85 int ret = 0;
86
87 if (atomic_dec_return(&sem->count) < 0)
88 ret = __down_trylock(sem);
89 return ret;
90}
91
92/*
93 * Note! This is subtle. We jump to wake people up only if
94 * the semaphore was negative (== somebody was waiting on it).
95 * The default case (no contention) will result in NO
96 * jumps for both down() and up().
97 */
98static inline void up(struct semaphore *sem)
99{
100 if (atomic_inc_return(&sem->count) <= 0)
101 __up(sem);
102}
103
104#endif /* __ASSEMBLY__ */
105#endif /* _BFIN_SEMAPHORE_H */
diff --git a/include/asm-cris/semaphore-helper.h b/include/asm-cris/semaphore-helper.h
deleted file mode 100644
index 27bfeca1b981..000000000000
--- a/include/asm-cris/semaphore-helper.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/* $Id: semaphore-helper.h,v 1.3 2001/03/26 15:00:33 orjanf Exp $
2 *
3 * SMP- and interrupt-safe semaphores helper functions. Generic versions, no
4 * optimizations whatsoever...
5 *
6 */
7
8#ifndef _ASM_SEMAPHORE_HELPER_H
9#define _ASM_SEMAPHORE_HELPER_H
10
11#include <asm/atomic.h>
12#include <linux/errno.h>
13
14#define read(a) ((a)->counter)
15#define inc(a) (((a)->counter)++)
16#define dec(a) (((a)->counter)--)
17
18#define count_inc(a) ((*(a))++)
19
20/*
21 * These two _must_ execute atomically wrt each other.
22 */
23static inline void wake_one_more(struct semaphore * sem)
24{
25 atomic_inc(&sem->waking);
26}
27
28static inline int waking_non_zero(struct semaphore *sem)
29{
30 unsigned long flags;
31 int ret = 0;
32
33 local_irq_save(flags);
34 if (read(&sem->waking) > 0) {
35 dec(&sem->waking);
36 ret = 1;
37 }
38 local_irq_restore(flags);
39 return ret;
40}
41
42static inline int waking_non_zero_interruptible(struct semaphore *sem,
43 struct task_struct *tsk)
44{
45 int ret = 0;
46 unsigned long flags;
47
48 local_irq_save(flags);
49 if (read(&sem->waking) > 0) {
50 dec(&sem->waking);
51 ret = 1;
52 } else if (signal_pending(tsk)) {
53 inc(&sem->count);
54 ret = -EINTR;
55 }
56 local_irq_restore(flags);
57 return ret;
58}
59
60static inline int waking_non_zero_trylock(struct semaphore *sem)
61{
62 int ret = 1;
63 unsigned long flags;
64
65 local_irq_save(flags);
66 if (read(&sem->waking) <= 0)
67 inc(&sem->count);
68 else {
69 dec(&sem->waking);
70 ret = 0;
71 }
72 local_irq_restore(flags);
73 return ret;
74}
75
76#endif /* _ASM_SEMAPHORE_HELPER_H */
77
78
diff --git a/include/asm-cris/semaphore.h b/include/asm-cris/semaphore.h
index 31a4ac448195..d9b2034ed1d2 100644
--- a/include/asm-cris/semaphore.h
+++ b/include/asm-cris/semaphore.h
@@ -1,133 +1 @@
1/* $Id: semaphore.h,v 1.3 2001/05/08 13:54:09 bjornw Exp $ */ #include <linux/semaphore.h>
2
3/* On the i386 these are coded in asm, perhaps we should as well. Later.. */
4
5#ifndef _CRIS_SEMAPHORE_H
6#define _CRIS_SEMAPHORE_H
7
8#define RW_LOCK_BIAS 0x01000000
9
10#include <linux/wait.h>
11#include <linux/spinlock.h>
12#include <linux/rwsem.h>
13
14#include <asm/system.h>
15#include <asm/atomic.h>
16
17/*
18 * CRIS semaphores, implemented in C-only so far.
19 */
20
21struct semaphore {
22 atomic_t count;
23 atomic_t waking;
24 wait_queue_head_t wait;
25};
26
27#define __SEMAPHORE_INITIALIZER(name, n) \
28{ \
29 .count = ATOMIC_INIT(n), \
30 .waking = ATOMIC_INIT(0), \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38
39static inline void sema_init(struct semaphore *sem, int val)
40{
41 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
42}
43
44static inline void init_MUTEX (struct semaphore *sem)
45{
46 sema_init(sem, 1);
47}
48
49static inline void init_MUTEX_LOCKED (struct semaphore *sem)
50{
51 sema_init(sem, 0);
52}
53
54extern void __down(struct semaphore * sem);
55extern int __down_interruptible(struct semaphore * sem);
56extern int __down_trylock(struct semaphore * sem);
57extern void __up(struct semaphore * sem);
58
59/* notice - we probably can do cli/sti here instead of saving */
60
61static inline void down(struct semaphore * sem)
62{
63 unsigned long flags;
64 int failed;
65
66 might_sleep();
67
68 /* atomically decrement the semaphores count, and if its negative, we wait */
69 cris_atomic_save(sem, flags);
70 failed = --(sem->count.counter) < 0;
71 cris_atomic_restore(sem, flags);
72 if(failed) {
73 __down(sem);
74 }
75}
76
77/*
78 * This version waits in interruptible state so that the waiting
79 * process can be killed. The down_interruptible routine
80 * returns negative for signalled and zero for semaphore acquired.
81 */
82
83static inline int down_interruptible(struct semaphore * sem)
84{
85 unsigned long flags;
86 int failed;
87
88 might_sleep();
89
90 /* atomically decrement the semaphores count, and if its negative, we wait */
91 cris_atomic_save(sem, flags);
92 failed = --(sem->count.counter) < 0;
93 cris_atomic_restore(sem, flags);
94 if(failed)
95 failed = __down_interruptible(sem);
96 return(failed);
97}
98
99static inline int down_trylock(struct semaphore * sem)
100{
101 unsigned long flags;
102 int failed;
103
104 cris_atomic_save(sem, flags);
105 failed = --(sem->count.counter) < 0;
106 cris_atomic_restore(sem, flags);
107 if(failed)
108 failed = __down_trylock(sem);
109 return(failed);
110
111}
112
113/*
114 * Note! This is subtle. We jump to wake people up only if
115 * the semaphore was negative (== somebody was waiting on it).
116 * The default case (no contention) will result in NO
117 * jumps for both down() and up().
118 */
119static inline void up(struct semaphore * sem)
120{
121 unsigned long flags;
122 int wakeup;
123
124 /* atomically increment the semaphores count, and if it was negative, we wake people */
125 cris_atomic_save(sem, flags);
126 wakeup = ++(sem->count.counter) <= 0;
127 cris_atomic_restore(sem, flags);
128 if(wakeup) {
129 __up(sem);
130 }
131}
132
133#endif
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h
index d7aaa1911a1a..d9b2034ed1d2 100644
--- a/include/asm-frv/semaphore.h
+++ b/include/asm-frv/semaphore.h
@@ -1,155 +1 @@
1/* semaphore.h: semaphores for the FR-V #include <linux/semaphore.h>
2 *
3 * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#ifndef _ASM_SEMAPHORE_H
12#define _ASM_SEMAPHORE_H
13
14#define RW_LOCK_BIAS 0x01000000
15
16#ifndef __ASSEMBLY__
17
18#include <linux/linkage.h>
19#include <linux/wait.h>
20#include <linux/spinlock.h>
21#include <linux/rwsem.h>
22
23/*
24 * the semaphore definition
25 * - if counter is >0 then there are tokens available on the semaphore for down to collect
26 * - if counter is <=0 then there are no spare tokens, and anyone that wants one must wait
27 * - if wait_list is not empty, then there are processes waiting for the semaphore
28 */
29struct semaphore {
30 unsigned counter;
31 spinlock_t wait_lock;
32 struct list_head wait_list;
33#ifdef CONFIG_DEBUG_SEMAPHORE
34 unsigned __magic;
35#endif
36};
37
38#ifdef CONFIG_DEBUG_SEMAPHORE
39# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
40#else
41# define __SEM_DEBUG_INIT(name)
42#endif
43
44
45#define __SEMAPHORE_INITIALIZER(name,count) \
46{ count, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __SEM_DEBUG_INIT(name) }
47
48#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
49 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
50
51#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
52
53static inline void sema_init (struct semaphore *sem, int val)
54{
55 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
56}
57
58static inline void init_MUTEX (struct semaphore *sem)
59{
60 sema_init(sem, 1);
61}
62
63static inline void init_MUTEX_LOCKED (struct semaphore *sem)
64{
65 sema_init(sem, 0);
66}
67
68extern void __down(struct semaphore *sem, unsigned long flags);
69extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
70extern void __up(struct semaphore *sem);
71
72static inline void down(struct semaphore *sem)
73{
74 unsigned long flags;
75
76#ifdef CONFIG_DEBUG_SEMAPHORE
77 CHECK_MAGIC(sem->__magic);
78#endif
79
80 spin_lock_irqsave(&sem->wait_lock, flags);
81 if (likely(sem->counter > 0)) {
82 sem->counter--;
83 spin_unlock_irqrestore(&sem->wait_lock, flags);
84 }
85 else {
86 __down(sem, flags);
87 }
88}
89
90static inline int down_interruptible(struct semaphore *sem)
91{
92 unsigned long flags;
93 int ret = 0;
94
95#ifdef CONFIG_DEBUG_SEMAPHORE
96 CHECK_MAGIC(sem->__magic);
97#endif
98
99 spin_lock_irqsave(&sem->wait_lock, flags);
100 if (likely(sem->counter > 0)) {
101 sem->counter--;
102 spin_unlock_irqrestore(&sem->wait_lock, flags);
103 }
104 else {
105 ret = __down_interruptible(sem, flags);
106 }
107 return ret;
108}
109
110/*
111 * non-blockingly attempt to down() a semaphore.
112 * - returns zero if we acquired it
113 */
114static inline int down_trylock(struct semaphore *sem)
115{
116 unsigned long flags;
117 int success = 0;
118
119#ifdef CONFIG_DEBUG_SEMAPHORE
120 CHECK_MAGIC(sem->__magic);
121#endif
122
123 spin_lock_irqsave(&sem->wait_lock, flags);
124 if (sem->counter > 0) {
125 sem->counter--;
126 success = 1;
127 }
128 spin_unlock_irqrestore(&sem->wait_lock, flags);
129 return !success;
130}
131
132static inline void up(struct semaphore *sem)
133{
134 unsigned long flags;
135
136#ifdef CONFIG_DEBUG_SEMAPHORE
137 CHECK_MAGIC(sem->__magic);
138#endif
139
140 spin_lock_irqsave(&sem->wait_lock, flags);
141 if (!list_empty(&sem->wait_list))
142 __up(sem);
143 else
144 sem->counter++;
145 spin_unlock_irqrestore(&sem->wait_lock, flags);
146}
147
148static inline int sem_getcount(struct semaphore *sem)
149{
150 return sem->counter;
151}
152
153#endif /* __ASSEMBLY__ */
154
155#endif
diff --git a/include/asm-h8300/semaphore-helper.h b/include/asm-h8300/semaphore-helper.h
deleted file mode 100644
index 4fea36be5fd8..000000000000
--- a/include/asm-h8300/semaphore-helper.h
+++ /dev/null
@@ -1,85 +0,0 @@
1#ifndef _H8300_SEMAPHORE_HELPER_H
2#define _H8300_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 *
9 * based on
10 * m68k version by Andreas Schwab
11 */
12
13#include <linux/errno.h>
14
15/*
16 * These two _must_ execute atomically wrt each other.
17 */
18static inline void wake_one_more(struct semaphore * sem)
19{
20 atomic_inc((atomic_t *)&sem->sleepers);
21}
22
23static inline int waking_non_zero(struct semaphore *sem)
24{
25 int ret;
26 unsigned long flags;
27
28 spin_lock_irqsave(&semaphore_wake_lock, flags);
29 ret = 0;
30 if (sem->sleepers > 0) {
31 sem->sleepers--;
32 ret = 1;
33 }
34 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
35 return ret;
36}
37
38/*
39 * waking_non_zero_interruptible:
40 * 1 got the lock
41 * 0 go to sleep
42 * -EINTR interrupted
43 */
44static inline int waking_non_zero_interruptible(struct semaphore *sem,
45 struct task_struct *tsk)
46{
47 int ret;
48 unsigned long flags;
49
50 spin_lock_irqsave(&semaphore_wake_lock, flags);
51 ret = 0;
52 if (sem->sleepers > 0) {
53 sem->sleepers--;
54 ret = 1;
55 } else if (signal_pending(tsk)) {
56 atomic_inc(&sem->count);
57 ret = -EINTR;
58 }
59 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
60 return ret;
61}
62
63/*
64 * waking_non_zero_trylock:
65 * 1 failed to lock
66 * 0 got the lock
67 */
68static inline int waking_non_zero_trylock(struct semaphore *sem)
69{
70 int ret;
71 unsigned long flags;
72
73 spin_lock_irqsave(&semaphore_wake_lock, flags);
74 ret = 1;
75 if (sem->sleepers <= 0)
76 atomic_inc(&sem->count);
77 else {
78 sem->sleepers--;
79 ret = 0;
80 }
81 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
82 return ret;
83}
84
85#endif
diff --git a/include/asm-h8300/semaphore.h b/include/asm-h8300/semaphore.h
index f3ffff83ff09..d9b2034ed1d2 100644
--- a/include/asm-h8300/semaphore.h
+++ b/include/asm-h8300/semaphore.h
@@ -1,190 +1 @@
1#ifndef _H8300_SEMAPHORE_H #include <linux/semaphore.h>
2#define _H8300_SEMAPHORE_H
3
4#define RW_LOCK_BIAS 0x01000000
5
6#ifndef __ASSEMBLY__
7
8#include <linux/linkage.h>
9#include <linux/wait.h>
10#include <linux/spinlock.h>
11#include <linux/rwsem.h>
12
13#include <asm/system.h>
14#include <asm/atomic.h>
15
16/*
17 * Interrupt-safe semaphores..
18 *
19 * (C) Copyright 1996 Linus Torvalds
20 *
21 * H8/300 version by Yoshinori Sato
22 */
23
24
25struct semaphore {
26 atomic_t count;
27 int sleepers;
28 wait_queue_head_t wait;
29};
30
31#define __SEMAPHORE_INITIALIZER(name, n) \
32{ \
33 .count = ATOMIC_INIT(n), \
34 .sleepers = 0, \
35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
36}
37
38#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
40
41#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
42
43static inline void sema_init (struct semaphore *sem, int val)
44{
45 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
46}
47
48static inline void init_MUTEX (struct semaphore *sem)
49{
50 sema_init(sem, 1);
51}
52
53static inline void init_MUTEX_LOCKED (struct semaphore *sem)
54{
55 sema_init(sem, 0);
56}
57
58asmlinkage void __down_failed(void /* special register calling convention */);
59asmlinkage int __down_failed_interruptible(void /* params in registers */);
60asmlinkage int __down_failed_trylock(void /* params in registers */);
61asmlinkage void __up_wakeup(void /* special register calling convention */);
62
63asmlinkage void __down(struct semaphore * sem);
64asmlinkage int __down_interruptible(struct semaphore * sem);
65asmlinkage int __down_trylock(struct semaphore * sem);
66asmlinkage void __up(struct semaphore * sem);
67
68extern spinlock_t semaphore_wake_lock;
69
70/*
71 * This is ugly, but we want the default case to fall through.
72 * "down_failed" is a special asm handler that calls the C
73 * routine that actually waits. See arch/m68k/lib/semaphore.S
74 */
75static inline void down(struct semaphore * sem)
76{
77 register atomic_t *count asm("er0");
78
79 might_sleep();
80
81 count = &(sem->count);
82 __asm__ __volatile__(
83 "stc ccr,r3l\n\t"
84 "orc #0x80,ccr\n\t"
85 "mov.l %2, er1\n\t"
86 "dec.l #1,er1\n\t"
87 "mov.l er1,%0\n\t"
88 "bpl 1f\n\t"
89 "ldc r3l,ccr\n\t"
90 "mov.l %1,er0\n\t"
91 "jsr @___down\n\t"
92 "bra 2f\n"
93 "1:\n\t"
94 "ldc r3l,ccr\n"
95 "2:"
96 : "=m"(*count)
97 : "g"(sem),"m"(*count)
98 : "cc", "er1", "er2", "er3");
99}
100
101static inline int down_interruptible(struct semaphore * sem)
102{
103 register atomic_t *count asm("er0");
104
105 might_sleep();
106
107 count = &(sem->count);
108 __asm__ __volatile__(
109 "stc ccr,r1l\n\t"
110 "orc #0x80,ccr\n\t"
111 "mov.l %3, er2\n\t"
112 "dec.l #1,er2\n\t"
113 "mov.l er2,%1\n\t"
114 "bpl 1f\n\t"
115 "ldc r1l,ccr\n\t"
116 "mov.l %2,er0\n\t"
117 "jsr @___down_interruptible\n\t"
118 "bra 2f\n"
119 "1:\n\t"
120 "ldc r1l,ccr\n\t"
121 "sub.l %0,%0\n\t"
122 "2:\n\t"
123 : "=r" (count),"=m" (*count)
124 : "g"(sem),"m"(*count)
125 : "cc", "er1", "er2", "er3");
126 return (int)count;
127}
128
129static inline int down_trylock(struct semaphore * sem)
130{
131 register atomic_t *count asm("er0");
132
133 count = &(sem->count);
134 __asm__ __volatile__(
135 "stc ccr,r3l\n\t"
136 "orc #0x80,ccr\n\t"
137 "mov.l %3,er2\n\t"
138 "dec.l #1,er2\n\t"
139 "mov.l er2,%0\n\t"
140 "bpl 1f\n\t"
141 "ldc r3l,ccr\n\t"
142 "jmp @3f\n\t"
143 LOCK_SECTION_START(".align 2\n\t")
144 "3:\n\t"
145 "mov.l %2,er0\n\t"
146 "jsr @___down_trylock\n\t"
147 "jmp @2f\n\t"
148 LOCK_SECTION_END
149 "1:\n\t"
150 "ldc r3l,ccr\n\t"
151 "sub.l %1,%1\n"
152 "2:"
153 : "=m" (*count),"=r"(count)
154 : "g"(sem),"m"(*count)
155 : "cc", "er1","er2", "er3");
156 return (int)count;
157}
158
159/*
160 * Note! This is subtle. We jump to wake people up only if
161 * the semaphore was negative (== somebody was waiting on it).
162 * The default case (no contention) will result in NO
163 * jumps for both down() and up().
164 */
165static inline void up(struct semaphore * sem)
166{
167 register atomic_t *count asm("er0");
168
169 count = &(sem->count);
170 __asm__ __volatile__(
171 "stc ccr,r3l\n\t"
172 "orc #0x80,ccr\n\t"
173 "mov.l %2,er1\n\t"
174 "inc.l #1,er1\n\t"
175 "mov.l er1,%0\n\t"
176 "ldc r3l,ccr\n\t"
177 "sub.l er2,er2\n\t"
178 "cmp.l er2,er1\n\t"
179 "bgt 1f\n\t"
180 "mov.l %1,er0\n\t"
181 "jsr @___up\n"
182 "1:"
183 : "=m"(*count)
184 : "g"(sem),"m"(*count)
185 : "cc", "er1", "er2", "er3");
186}
187
188#endif /* __ASSEMBLY__ */
189
190#endif
diff --git a/include/asm-ia64/semaphore.h b/include/asm-ia64/semaphore.h
index d8393d11288d..d9b2034ed1d2 100644
--- a/include/asm-ia64/semaphore.h
+++ b/include/asm-ia64/semaphore.h
@@ -1,99 +1 @@
1#ifndef _ASM_IA64_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ASM_IA64_SEMAPHORE_H
3
4/*
5 * Copyright (C) 1998-2000 Hewlett-Packard Co
6 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8
9#include <linux/wait.h>
10#include <linux/rwsem.h>
11
12#include <asm/atomic.h>
13
14struct semaphore {
15 atomic_t count;
16 int sleepers;
17 wait_queue_head_t wait;
18};
19
20#define __SEMAPHORE_INITIALIZER(name, n) \
21{ \
22 .count = ATOMIC_INIT(n), \
23 .sleepers = 0, \
24 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
25}
26
27#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
28 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
29
30#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
31
32static inline void
33sema_init (struct semaphore *sem, int val)
34{
35 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
36}
37
38static inline void
39init_MUTEX (struct semaphore *sem)
40{
41 sema_init(sem, 1);
42}
43
44static inline void
45init_MUTEX_LOCKED (struct semaphore *sem)
46{
47 sema_init(sem, 0);
48}
49
50extern void __down (struct semaphore * sem);
51extern int __down_interruptible (struct semaphore * sem);
52extern int __down_trylock (struct semaphore * sem);
53extern void __up (struct semaphore * sem);
54
55/*
56 * Atomically decrement the semaphore's count. If it goes negative,
57 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
58 */
59static inline void
60down (struct semaphore *sem)
61{
62 might_sleep();
63 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
64 __down(sem);
65}
66
67/*
68 * Atomically decrement the semaphore's count. If it goes negative,
69 * block the calling thread in the TASK_INTERRUPTIBLE state.
70 */
71static inline int
72down_interruptible (struct semaphore * sem)
73{
74 int ret = 0;
75
76 might_sleep();
77 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
78 ret = __down_interruptible(sem);
79 return ret;
80}
81
82static inline int
83down_trylock (struct semaphore *sem)
84{
85 int ret = 0;
86
87 if (ia64_fetchadd(-1, &sem->count.counter, acq) < 1)
88 ret = __down_trylock(sem);
89 return ret;
90}
91
92static inline void
93up (struct semaphore * sem)
94{
95 if (ia64_fetchadd(1, &sem->count.counter, rel) <= -1)
96 __up(sem);
97}
98
99#endif /* _ASM_IA64_SEMAPHORE_H */
diff --git a/include/asm-m32r/semaphore.h b/include/asm-m32r/semaphore.h
index b5bf95a6f2b4..d9b2034ed1d2 100644
--- a/include/asm-m32r/semaphore.h
+++ b/include/asm-m32r/semaphore.h
@@ -1,144 +1 @@
1#ifndef _ASM_M32R_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ASM_M32R_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * Copyright (C) 1996 Linus Torvalds
12 * Copyright (C) 2004, 2006 Hirokazu Takata <takata at linux-m32r.org>
13 */
14
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17#include <asm/assembler.h>
18#include <asm/system.h>
19#include <asm/atomic.h>
20
21struct semaphore {
22 atomic_t count;
23 int sleepers;
24 wait_queue_head_t wait;
25};
26
27#define __SEMAPHORE_INITIALIZER(name, n) \
28{ \
29 .count = ATOMIC_INIT(n), \
30 .sleepers = 0, \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38
39static inline void sema_init (struct semaphore *sem, int val)
40{
41/*
42 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
43 *
44 * i'd rather use the more flexible initialization above, but sadly
45 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
46 */
47 atomic_set(&sem->count, val);
48 sem->sleepers = 0;
49 init_waitqueue_head(&sem->wait);
50}
51
52static inline void init_MUTEX (struct semaphore *sem)
53{
54 sema_init(sem, 1);
55}
56
57static inline void init_MUTEX_LOCKED (struct semaphore *sem)
58{
59 sema_init(sem, 0);
60}
61
62asmlinkage void __down_failed(void /* special register calling convention */);
63asmlinkage int __down_failed_interruptible(void /* params in registers */);
64asmlinkage int __down_failed_trylock(void /* params in registers */);
65asmlinkage void __up_wakeup(void /* special register calling convention */);
66
67asmlinkage void __down(struct semaphore * sem);
68asmlinkage int __down_interruptible(struct semaphore * sem);
69asmlinkage int __down_trylock(struct semaphore * sem);
70asmlinkage void __up(struct semaphore * sem);
71
72/*
73 * Atomically decrement the semaphore's count. If it goes negative,
74 * block the calling thread in the TASK_UNINTERRUPTIBLE state.
75 */
76static inline void down(struct semaphore * sem)
77{
78 might_sleep();
79 if (unlikely(atomic_dec_return(&sem->count) < 0))
80 __down(sem);
81}
82
83/*
84 * Interruptible try to acquire a semaphore. If we obtained
85 * it, return zero. If we were interrupted, returns -EINTR
86 */
87static inline int down_interruptible(struct semaphore * sem)
88{
89 int result = 0;
90
91 might_sleep();
92 if (unlikely(atomic_dec_return(&sem->count) < 0))
93 result = __down_interruptible(sem);
94
95 return result;
96}
97
98/*
99 * Non-blockingly attempt to down() a semaphore.
100 * Returns zero if we acquired it
101 */
102static inline int down_trylock(struct semaphore * sem)
103{
104 unsigned long flags;
105 long count;
106 int result = 0;
107
108 local_irq_save(flags);
109 __asm__ __volatile__ (
110 "# down_trylock \n\t"
111 DCACHE_CLEAR("%0", "r4", "%1")
112 M32R_LOCK" %0, @%1; \n\t"
113 "addi %0, #-1; \n\t"
114 M32R_UNLOCK" %0, @%1; \n\t"
115 : "=&r" (count)
116 : "r" (&sem->count)
117 : "memory"
118#ifdef CONFIG_CHIP_M32700_TS1
119 , "r4"
120#endif /* CONFIG_CHIP_M32700_TS1 */
121 );
122 local_irq_restore(flags);
123
124 if (unlikely(count < 0))
125 result = __down_trylock(sem);
126
127 return result;
128}
129
130/*
131 * Note! This is subtle. We jump to wake people up only if
132 * the semaphore was negative (== somebody was waiting on it).
133 * The default case (no contention) will result in NO
134 * jumps for both down() and up().
135 */
136static inline void up(struct semaphore * sem)
137{
138 if (unlikely(atomic_inc_return(&sem->count) <= 0))
139 __up(sem);
140}
141
142#endif /* __KERNEL__ */
143
144#endif /* _ASM_M32R_SEMAPHORE_H */
diff --git a/include/asm-m68k/semaphore-helper.h b/include/asm-m68k/semaphore-helper.h
deleted file mode 100644
index eef30ba0b499..000000000000
--- a/include/asm-m68k/semaphore-helper.h
+++ /dev/null
@@ -1,142 +0,0 @@
1#ifndef _M68K_SEMAPHORE_HELPER_H
2#define _M68K_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 *
9 * m68k version by Andreas Schwab
10 */
11
12#include <linux/errno.h>
13
14/*
15 * These two _must_ execute atomically wrt each other.
16 */
17static inline void wake_one_more(struct semaphore * sem)
18{
19 atomic_inc(&sem->waking);
20}
21
22#ifndef CONFIG_RMW_INSNS
23extern spinlock_t semaphore_wake_lock;
24#endif
25
26static inline int waking_non_zero(struct semaphore *sem)
27{
28 int ret;
29#ifndef CONFIG_RMW_INSNS
30 unsigned long flags;
31
32 spin_lock_irqsave(&semaphore_wake_lock, flags);
33 ret = 0;
34 if (atomic_read(&sem->waking) > 0) {
35 atomic_dec(&sem->waking);
36 ret = 1;
37 }
38 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
39#else
40 int tmp1, tmp2;
41
42 __asm__ __volatile__
43 ("1: movel %1,%2\n"
44 " jle 2f\n"
45 " subql #1,%2\n"
46 " casl %1,%2,%3\n"
47 " jne 1b\n"
48 " moveq #1,%0\n"
49 "2:"
50 : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
51 : "m" (sem->waking), "0" (0), "1" (sem->waking));
52#endif
53
54 return ret;
55}
56
57/*
58 * waking_non_zero_interruptible:
59 * 1 got the lock
60 * 0 go to sleep
61 * -EINTR interrupted
62 */
63static inline int waking_non_zero_interruptible(struct semaphore *sem,
64 struct task_struct *tsk)
65{
66 int ret;
67#ifndef CONFIG_RMW_INSNS
68 unsigned long flags;
69
70 spin_lock_irqsave(&semaphore_wake_lock, flags);
71 ret = 0;
72 if (atomic_read(&sem->waking) > 0) {
73 atomic_dec(&sem->waking);
74 ret = 1;
75 } else if (signal_pending(tsk)) {
76 atomic_inc(&sem->count);
77 ret = -EINTR;
78 }
79 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
80#else
81 int tmp1, tmp2;
82
83 __asm__ __volatile__
84 ("1: movel %1,%2\n"
85 " jle 2f\n"
86 " subql #1,%2\n"
87 " casl %1,%2,%3\n"
88 " jne 1b\n"
89 " moveq #1,%0\n"
90 " jra %a4\n"
91 "2:"
92 : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
93 : "m" (sem->waking), "i" (&&next), "0" (0), "1" (sem->waking));
94 if (signal_pending(tsk)) {
95 atomic_inc(&sem->count);
96 ret = -EINTR;
97 }
98next:
99#endif
100
101 return ret;
102}
103
104/*
105 * waking_non_zero_trylock:
106 * 1 failed to lock
107 * 0 got the lock
108 */
109static inline int waking_non_zero_trylock(struct semaphore *sem)
110{
111 int ret;
112#ifndef CONFIG_RMW_INSNS
113 unsigned long flags;
114
115 spin_lock_irqsave(&semaphore_wake_lock, flags);
116 ret = 1;
117 if (atomic_read(&sem->waking) > 0) {
118 atomic_dec(&sem->waking);
119 ret = 0;
120 } else
121 atomic_inc(&sem->count);
122 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
123#else
124 int tmp1, tmp2;
125
126 __asm__ __volatile__
127 ("1: movel %1,%2\n"
128 " jle 2f\n"
129 " subql #1,%2\n"
130 " casl %1,%2,%3\n"
131 " jne 1b\n"
132 " moveq #0,%0\n"
133 "2:"
134 : "=d" (ret), "=d" (tmp1), "=d" (tmp2)
135 : "m" (sem->waking), "0" (1), "1" (sem->waking));
136 if (ret)
137 atomic_inc(&sem->count);
138#endif
139 return ret;
140}
141
142#endif
diff --git a/include/asm-m68k/semaphore.h b/include/asm-m68k/semaphore.h
index 64d6b119bb0a..d9b2034ed1d2 100644
--- a/include/asm-m68k/semaphore.h
+++ b/include/asm-m68k/semaphore.h
@@ -1,163 +1 @@
1#ifndef _M68K_SEMAPHORE_H #include <linux/semaphore.h>
2#define _M68K_SEMAPHORE_H
3
4#define RW_LOCK_BIAS 0x01000000
5
6#ifndef __ASSEMBLY__
7
8#include <linux/linkage.h>
9#include <linux/wait.h>
10#include <linux/spinlock.h>
11#include <linux/rwsem.h>
12#include <linux/stringify.h>
13
14#include <asm/system.h>
15#include <asm/atomic.h>
16
17/*
18 * Interrupt-safe semaphores..
19 *
20 * (C) Copyright 1996 Linus Torvalds
21 *
22 * m68k version by Andreas Schwab
23 */
24
25
26struct semaphore {
27 atomic_t count;
28 atomic_t waking;
29 wait_queue_head_t wait;
30};
31
32#define __SEMAPHORE_INITIALIZER(name, n) \
33{ \
34 .count = ATOMIC_INIT(n), \
35 .waking = ATOMIC_INIT(0), \
36 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
37}
38
39#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
40 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
41
42#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
43
44static inline void sema_init(struct semaphore *sem, int val)
45{
46 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
47}
48
49static inline void init_MUTEX (struct semaphore *sem)
50{
51 sema_init(sem, 1);
52}
53
54static inline void init_MUTEX_LOCKED (struct semaphore *sem)
55{
56 sema_init(sem, 0);
57}
58
59asmlinkage void __down_failed(void /* special register calling convention */);
60asmlinkage int __down_failed_interruptible(void /* params in registers */);
61asmlinkage int __down_failed_trylock(void /* params in registers */);
62asmlinkage void __up_wakeup(void /* special register calling convention */);
63
64asmlinkage void __down(struct semaphore * sem);
65asmlinkage int __down_interruptible(struct semaphore * sem);
66asmlinkage int __down_trylock(struct semaphore * sem);
67asmlinkage void __up(struct semaphore * sem);
68
69/*
70 * This is ugly, but we want the default case to fall through.
71 * "down_failed" is a special asm handler that calls the C
72 * routine that actually waits. See arch/m68k/lib/semaphore.S
73 */
74static inline void down(struct semaphore *sem)
75{
76 register struct semaphore *sem1 __asm__ ("%a1") = sem;
77
78 might_sleep();
79 __asm__ __volatile__(
80 "| atomic down operation\n\t"
81 "subql #1,%0@\n\t"
82 "jmi 2f\n\t"
83 "1:\n"
84 LOCK_SECTION_START(".even\n\t")
85 "2:\tpea 1b\n\t"
86 "jbra __down_failed\n"
87 LOCK_SECTION_END
88 : /* no outputs */
89 : "a" (sem1)
90 : "memory");
91}
92
93static inline int down_interruptible(struct semaphore *sem)
94{
95 register struct semaphore *sem1 __asm__ ("%a1") = sem;
96 register int result __asm__ ("%d0");
97
98 might_sleep();
99 __asm__ __volatile__(
100 "| atomic interruptible down operation\n\t"
101 "subql #1,%1@\n\t"
102 "jmi 2f\n\t"
103 "clrl %0\n"
104 "1:\n"
105 LOCK_SECTION_START(".even\n\t")
106 "2:\tpea 1b\n\t"
107 "jbra __down_failed_interruptible\n"
108 LOCK_SECTION_END
109 : "=d" (result)
110 : "a" (sem1)
111 : "memory");
112 return result;
113}
114
115static inline int down_trylock(struct semaphore *sem)
116{
117 register struct semaphore *sem1 __asm__ ("%a1") = sem;
118 register int result __asm__ ("%d0");
119
120 __asm__ __volatile__(
121 "| atomic down trylock operation\n\t"
122 "subql #1,%1@\n\t"
123 "jmi 2f\n\t"
124 "clrl %0\n"
125 "1:\n"
126 LOCK_SECTION_START(".even\n\t")
127 "2:\tpea 1b\n\t"
128 "jbra __down_failed_trylock\n"
129 LOCK_SECTION_END
130 : "=d" (result)
131 : "a" (sem1)
132 : "memory");
133 return result;
134}
135
136/*
137 * Note! This is subtle. We jump to wake people up only if
138 * the semaphore was negative (== somebody was waiting on it).
139 * The default case (no contention) will result in NO
140 * jumps for both down() and up().
141 */
142static inline void up(struct semaphore *sem)
143{
144 register struct semaphore *sem1 __asm__ ("%a1") = sem;
145
146 __asm__ __volatile__(
147 "| atomic up operation\n\t"
148 "addql #1,%0@\n\t"
149 "jle 2f\n"
150 "1:\n"
151 LOCK_SECTION_START(".even\n\t")
152 "2:\t"
153 "pea 1b\n\t"
154 "jbra __up_wakeup\n"
155 LOCK_SECTION_END
156 : /* no outputs */
157 : "a" (sem1)
158 : "memory");
159}
160
161#endif /* __ASSEMBLY__ */
162
163#endif
diff --git a/include/asm-m68knommu/semaphore-helper.h b/include/asm-m68knommu/semaphore-helper.h
deleted file mode 100644
index 43da7bc483c7..000000000000
--- a/include/asm-m68knommu/semaphore-helper.h
+++ /dev/null
@@ -1,82 +0,0 @@
1#ifndef _M68K_SEMAPHORE_HELPER_H
2#define _M68K_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 *
9 * m68k version by Andreas Schwab
10 */
11
12
13/*
14 * These two _must_ execute atomically wrt each other.
15 */
16static inline void wake_one_more(struct semaphore * sem)
17{
18 atomic_inc(&sem->waking);
19}
20
21static inline int waking_non_zero(struct semaphore *sem)
22{
23 int ret;
24 unsigned long flags;
25
26 spin_lock_irqsave(&semaphore_wake_lock, flags);
27 ret = 0;
28 if (atomic_read(&sem->waking) > 0) {
29 atomic_dec(&sem->waking);
30 ret = 1;
31 }
32 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
33 return ret;
34}
35
36/*
37 * waking_non_zero_interruptible:
38 * 1 got the lock
39 * 0 go to sleep
40 * -EINTR interrupted
41 */
42static inline int waking_non_zero_interruptible(struct semaphore *sem,
43 struct task_struct *tsk)
44{
45 int ret;
46 unsigned long flags;
47
48 spin_lock_irqsave(&semaphore_wake_lock, flags);
49 ret = 0;
50 if (atomic_read(&sem->waking) > 0) {
51 atomic_dec(&sem->waking);
52 ret = 1;
53 } else if (signal_pending(tsk)) {
54 atomic_inc(&sem->count);
55 ret = -EINTR;
56 }
57 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
58 return ret;
59}
60
61/*
62 * waking_non_zero_trylock:
63 * 1 failed to lock
64 * 0 got the lock
65 */
66static inline int waking_non_zero_trylock(struct semaphore *sem)
67{
68 int ret;
69 unsigned long flags;
70
71 spin_lock_irqsave(&semaphore_wake_lock, flags);
72 ret = 1;
73 if (atomic_read(&sem->waking) > 0) {
74 atomic_dec(&sem->waking);
75 ret = 0;
76 } else
77 atomic_inc(&sem->count);
78 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
79 return ret;
80}
81
82#endif
diff --git a/include/asm-m68knommu/semaphore.h b/include/asm-m68knommu/semaphore.h
index 5779eb6c0689..d9b2034ed1d2 100644
--- a/include/asm-m68knommu/semaphore.h
+++ b/include/asm-m68knommu/semaphore.h
@@ -1,153 +1 @@
1#ifndef _M68K_SEMAPHORE_H #include <linux/semaphore.h>
2#define _M68K_SEMAPHORE_H
3
4#define RW_LOCK_BIAS 0x01000000
5
6#ifndef __ASSEMBLY__
7
8#include <linux/linkage.h>
9#include <linux/wait.h>
10#include <linux/spinlock.h>
11#include <linux/rwsem.h>
12
13#include <asm/system.h>
14#include <asm/atomic.h>
15
16/*
17 * Interrupt-safe semaphores..
18 *
19 * (C) Copyright 1996 Linus Torvalds
20 *
21 * m68k version by Andreas Schwab
22 */
23
24
25struct semaphore {
26 atomic_t count;
27 atomic_t waking;
28 wait_queue_head_t wait;
29};
30
31#define __SEMAPHORE_INITIALIZER(name, n) \
32{ \
33 .count = ATOMIC_INIT(n), \
34 .waking = ATOMIC_INIT(0), \
35 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
36}
37
38#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
39 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
40
41#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
42
43static inline void sema_init (struct semaphore *sem, int val)
44{
45 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER(*sem, val);
46}
47
48static inline void init_MUTEX (struct semaphore *sem)
49{
50 sema_init(sem, 1);
51}
52
53static inline void init_MUTEX_LOCKED (struct semaphore *sem)
54{
55 sema_init(sem, 0);
56}
57
58asmlinkage void __down_failed(void /* special register calling convention */);
59asmlinkage int __down_failed_interruptible(void /* params in registers */);
60asmlinkage int __down_failed_trylock(void /* params in registers */);
61asmlinkage void __up_wakeup(void /* special register calling convention */);
62
63asmlinkage void __down(struct semaphore * sem);
64asmlinkage int __down_interruptible(struct semaphore * sem);
65asmlinkage int __down_trylock(struct semaphore * sem);
66asmlinkage void __up(struct semaphore * sem);
67
68extern spinlock_t semaphore_wake_lock;
69
70/*
71 * This is ugly, but we want the default case to fall through.
72 * "down_failed" is a special asm handler that calls the C
73 * routine that actually waits. See arch/m68k/lib/semaphore.S
74 */
75static inline void down(struct semaphore * sem)
76{
77 might_sleep();
78 __asm__ __volatile__(
79 "| atomic down operation\n\t"
80 "movel %0, %%a1\n\t"
81 "lea %%pc@(1f), %%a0\n\t"
82 "subql #1, %%a1@\n\t"
83 "jmi __down_failed\n"
84 "1:"
85 : /* no outputs */
86 : "g" (sem)
87 : "cc", "%a0", "%a1", "memory");
88}
89
90static inline int down_interruptible(struct semaphore * sem)
91{
92 int ret;
93
94 might_sleep();
95 __asm__ __volatile__(
96 "| atomic down operation\n\t"
97 "movel %1, %%a1\n\t"
98 "lea %%pc@(1f), %%a0\n\t"
99 "subql #1, %%a1@\n\t"
100 "jmi __down_failed_interruptible\n\t"
101 "clrl %%d0\n"
102 "1: movel %%d0, %0\n"
103 : "=d" (ret)
104 : "g" (sem)
105 : "cc", "%d0", "%a0", "%a1", "memory");
106 return(ret);
107}
108
109static inline int down_trylock(struct semaphore * sem)
110{
111 register struct semaphore *sem1 __asm__ ("%a1") = sem;
112 register int result __asm__ ("%d0");
113
114 __asm__ __volatile__(
115 "| atomic down trylock operation\n\t"
116 "subql #1,%1@\n\t"
117 "jmi 2f\n\t"
118 "clrl %0\n"
119 "1:\n"
120 ".section .text.lock,\"ax\"\n"
121 ".even\n"
122 "2:\tpea 1b\n\t"
123 "jbra __down_failed_trylock\n"
124 ".previous"
125 : "=d" (result)
126 : "a" (sem1)
127 : "memory");
128 return result;
129}
130
131/*
132 * Note! This is subtle. We jump to wake people up only if
133 * the semaphore was negative (== somebody was waiting on it).
134 * The default case (no contention) will result in NO
135 * jumps for both down() and up().
136 */
137static inline void up(struct semaphore * sem)
138{
139 __asm__ __volatile__(
140 "| atomic up operation\n\t"
141 "movel %0, %%a1\n\t"
142 "lea %%pc@(1f), %%a0\n\t"
143 "addql #1, %%a1@\n\t"
144 "jle __up_wakeup\n"
145 "1:"
146 : /* no outputs */
147 : "g" (sem)
148 : "cc", "%a0", "%a1", "memory");
149}
150
151#endif /* __ASSEMBLY__ */
152
153#endif
diff --git a/include/asm-mips/semaphore.h b/include/asm-mips/semaphore.h
index fdf8042b784b..d9b2034ed1d2 100644
--- a/include/asm-mips/semaphore.h
+++ b/include/asm-mips/semaphore.h
@@ -1,108 +1 @@
1/* #include <linux/semaphore.h>
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 Linus Torvalds
7 * Copyright (C) 1998, 99, 2000, 01, 04 Ralf Baechle
8 * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
9 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
10 *
11 * In all honesty, little of the old MIPS code left - the PPC64 variant was
12 * just looking nice and portable so I ripped it. Credits to whoever wrote
13 * it.
14 */
15#ifndef __ASM_SEMAPHORE_H
16#define __ASM_SEMAPHORE_H
17
18/*
19 * Remove spinlock-based RW semaphores; RW semaphore definitions are
20 * now in rwsem.h and we use the generic lib/rwsem.c implementation.
21 * Rework semaphores to use atomic_dec_if_positive.
22 * -- Paul Mackerras (paulus@samba.org)
23 */
24
25#ifdef __KERNEL__
26
27#include <asm/atomic.h>
28#include <asm/system.h>
29#include <linux/wait.h>
30#include <linux/rwsem.h>
31
32struct semaphore {
33 /*
34 * Note that any negative value of count is equivalent to 0,
35 * but additionally indicates that some process(es) might be
36 * sleeping on `wait'.
37 */
38 atomic_t count;
39 wait_queue_head_t wait;
40};
41
42#define __SEMAPHORE_INITIALIZER(name, n) \
43{ \
44 .count = ATOMIC_INIT(n), \
45 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
46}
47
48#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
49 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
50
51#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
52
53static inline void sema_init(struct semaphore *sem, int val)
54{
55 atomic_set(&sem->count, val);
56 init_waitqueue_head(&sem->wait);
57}
58
59static inline void init_MUTEX(struct semaphore *sem)
60{
61 sema_init(sem, 1);
62}
63
64static inline void init_MUTEX_LOCKED(struct semaphore *sem)
65{
66 sema_init(sem, 0);
67}
68
69extern void __down(struct semaphore * sem);
70extern int __down_interruptible(struct semaphore * sem);
71extern void __up(struct semaphore * sem);
72
73static inline void down(struct semaphore * sem)
74{
75 might_sleep();
76
77 /*
78 * Try to get the semaphore, take the slow path if we fail.
79 */
80 if (unlikely(atomic_dec_return(&sem->count) < 0))
81 __down(sem);
82}
83
84static inline int down_interruptible(struct semaphore * sem)
85{
86 int ret = 0;
87
88 might_sleep();
89
90 if (unlikely(atomic_dec_return(&sem->count) < 0))
91 ret = __down_interruptible(sem);
92 return ret;
93}
94
95static inline int down_trylock(struct semaphore * sem)
96{
97 return atomic_dec_if_positive(&sem->count) < 0;
98}
99
100static inline void up(struct semaphore * sem)
101{
102 if (unlikely(atomic_inc_return(&sem->count) <= 0))
103 __up(sem);
104}
105
106#endif /* __KERNEL__ */
107
108#endif /* __ASM_SEMAPHORE_H */
diff --git a/include/asm-mn10300/semaphore.h b/include/asm-mn10300/semaphore.h
index 5a9e1ad0b253..d9b2034ed1d2 100644
--- a/include/asm-mn10300/semaphore.h
+++ b/include/asm-mn10300/semaphore.h
@@ -1,169 +1 @@
1/* MN10300 Semaphores #include <linux/semaphore.h>
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#ifndef _ASM_SEMAPHORE_H
12#define _ASM_SEMAPHORE_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/linkage.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
19#include <linux/rwsem.h>
20
21#define SEMAPHORE_DEBUG 0
22
23/*
24 * the semaphore definition
25 * - if count is >0 then there are tokens available on the semaphore for down
26 * to collect
27 * - if count is <=0 then there are no spare tokens, and anyone that wants one
28 * must wait
29 * - if wait_list is not empty, then there are processes waiting for the
30 * semaphore
31 */
32struct semaphore {
33 atomic_t count; /* it's not really atomic, it's
34 * just that certain modules
35 * expect to be able to access
36 * it directly */
37 spinlock_t wait_lock;
38 struct list_head wait_list;
39#if SEMAPHORE_DEBUG
40 unsigned __magic;
41#endif
42};
43
44#if SEMAPHORE_DEBUG
45# define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
46#else
47# define __SEM_DEBUG_INIT(name)
48#endif
49
50
51#define __SEMAPHORE_INITIALIZER(name, init_count) \
52{ \
53 .count = ATOMIC_INIT(init_count), \
54 .wait_lock = __SPIN_LOCK_UNLOCKED((name).wait_lock), \
55 .wait_list = LIST_HEAD_INIT((name).wait_list) \
56 __SEM_DEBUG_INIT(name) \
57}
58
59#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
60 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
61
62#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
63#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
64
65static inline void sema_init(struct semaphore *sem, int val)
66{
67 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
68}
69
70static inline void init_MUTEX(struct semaphore *sem)
71{
72 sema_init(sem, 1);
73}
74
75static inline void init_MUTEX_LOCKED(struct semaphore *sem)
76{
77 sema_init(sem, 0);
78}
79
80extern void __down(struct semaphore *sem, unsigned long flags);
81extern int __down_interruptible(struct semaphore *sem, unsigned long flags);
82extern void __up(struct semaphore *sem);
83
84static inline void down(struct semaphore *sem)
85{
86 unsigned long flags;
87 int count;
88
89#if SEMAPHORE_DEBUG
90 CHECK_MAGIC(sem->__magic);
91#endif
92
93 spin_lock_irqsave(&sem->wait_lock, flags);
94 count = atomic_read(&sem->count);
95 if (likely(count > 0)) {
96 atomic_set(&sem->count, count - 1);
97 spin_unlock_irqrestore(&sem->wait_lock, flags);
98 } else {
99 __down(sem, flags);
100 }
101}
102
103static inline int down_interruptible(struct semaphore *sem)
104{
105 unsigned long flags;
106 int count, ret = 0;
107
108#if SEMAPHORE_DEBUG
109 CHECK_MAGIC(sem->__magic);
110#endif
111
112 spin_lock_irqsave(&sem->wait_lock, flags);
113 count = atomic_read(&sem->count);
114 if (likely(count > 0)) {
115 atomic_set(&sem->count, count - 1);
116 spin_unlock_irqrestore(&sem->wait_lock, flags);
117 } else {
118 ret = __down_interruptible(sem, flags);
119 }
120 return ret;
121}
122
123/*
124 * non-blockingly attempt to down() a semaphore.
125 * - returns zero if we acquired it
126 */
127static inline int down_trylock(struct semaphore *sem)
128{
129 unsigned long flags;
130 int count, success = 0;
131
132#if SEMAPHORE_DEBUG
133 CHECK_MAGIC(sem->__magic);
134#endif
135
136 spin_lock_irqsave(&sem->wait_lock, flags);
137 count = atomic_read(&sem->count);
138 if (likely(count > 0)) {
139 atomic_set(&sem->count, count - 1);
140 success = 1;
141 }
142 spin_unlock_irqrestore(&sem->wait_lock, flags);
143 return !success;
144}
145
146static inline void up(struct semaphore *sem)
147{
148 unsigned long flags;
149
150#if SEMAPHORE_DEBUG
151 CHECK_MAGIC(sem->__magic);
152#endif
153
154 spin_lock_irqsave(&sem->wait_lock, flags);
155 if (!list_empty(&sem->wait_list))
156 __up(sem);
157 else
158 atomic_set(&sem->count, atomic_read(&sem->count) + 1);
159 spin_unlock_irqrestore(&sem->wait_lock, flags);
160}
161
162static inline int sem_getcount(struct semaphore *sem)
163{
164 return atomic_read(&sem->count);
165}
166
167#endif /* __ASSEMBLY__ */
168
169#endif
diff --git a/include/asm-parisc/semaphore-helper.h b/include/asm-parisc/semaphore-helper.h
deleted file mode 100644
index 387f7c1277a2..000000000000
--- a/include/asm-parisc/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
1#ifndef _ASM_PARISC_SEMAPHORE_HELPER_H
2#define _ASM_PARISC_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1999 Andrea Arcangeli
9 */
10
11/*
12 * These two _must_ execute atomically wrt each other.
13 *
14 * This is trivially done with load_locked/store_cond,
15 * which we have. Let the rest of the losers suck eggs.
16 */
17static __inline__ void wake_one_more(struct semaphore * sem)
18{
19 atomic_inc((atomic_t *)&sem->waking);
20}
21
22static __inline__ int waking_non_zero(struct semaphore *sem)
23{
24 unsigned long flags;
25 int ret = 0;
26
27 spin_lock_irqsave(&semaphore_wake_lock, flags);
28 if (sem->waking > 0) {
29 sem->waking--;
30 ret = 1;
31 }
32 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
33 return ret;
34}
35
36/*
37 * waking_non_zero_interruptible:
38 * 1 got the lock
39 * 0 go to sleep
40 * -EINTR interrupted
41 *
42 * We must undo the sem->count down_interruptible() increment while we are
43 * protected by the spinlock in order to make atomic this atomic_inc() with the
44 * atomic_read() in wake_one_more(), otherwise we can race. -arca
45 */
46static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
47 struct task_struct *tsk)
48{
49 unsigned long flags;
50 int ret = 0;
51
52 spin_lock_irqsave(&semaphore_wake_lock, flags);
53 if (sem->waking > 0) {
54 sem->waking--;
55 ret = 1;
56 } else if (signal_pending(tsk)) {
57 atomic_inc(&sem->count);
58 ret = -EINTR;
59 }
60 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
61 return ret;
62}
63
64/*
65 * waking_non_zero_trylock:
66 * 1 failed to lock
67 * 0 got the lock
68 *
69 * We must undo the sem->count down_trylock() increment while we are
70 * protected by the spinlock in order to make atomic this atomic_inc() with the
71 * atomic_read() in wake_one_more(), otherwise we can race. -arca
72 */
73static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
74{
75 unsigned long flags;
76 int ret = 1;
77
78 spin_lock_irqsave(&semaphore_wake_lock, flags);
79 if (sem->waking <= 0)
80 atomic_inc(&sem->count);
81 else {
82 sem->waking--;
83 ret = 0;
84 }
85 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
86 return ret;
87}
88
89#endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */
diff --git a/include/asm-parisc/semaphore.h b/include/asm-parisc/semaphore.h
index a16271cdc748..d9b2034ed1d2 100644
--- a/include/asm-parisc/semaphore.h
+++ b/include/asm-parisc/semaphore.h
@@ -1,145 +1 @@
1/* SMP- and interrupt-safe semaphores. #include <linux/semaphore.h>
2 * PA-RISC version by Matthew Wilcox
3 *
4 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
5 * Copyright (C) 1996 Linus Torvalds
6 * Copyright (C) 1999-2001 Matthew Wilcox < willy at debian d0T org >
7 * Copyright (C) 2000 Grant Grundler < grundler a debian org >
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef _ASM_PARISC_SEMAPHORE_H
25#define _ASM_PARISC_SEMAPHORE_H
26
27#include <linux/spinlock.h>
28#include <linux/wait.h>
29#include <linux/rwsem.h>
30
31#include <asm/system.h>
32
33/*
34 * The `count' is initialised to the number of people who are allowed to
35 * take the lock. (Normally we want a mutex, so this is `1'). if
36 * `count' is positive, the lock can be taken. if it's 0, no-one is
37 * waiting on it. if it's -1, at least one task is waiting.
38 */
39struct semaphore {
40 spinlock_t sentry;
41 int count;
42 wait_queue_head_t wait;
43};
44
45#define __SEMAPHORE_INITIALIZER(name, n) \
46{ \
47 .sentry = SPIN_LOCK_UNLOCKED, \
48 .count = n, \
49 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
50}
51
52#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
53 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
54
55#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
56
57static inline void sema_init (struct semaphore *sem, int val)
58{
59 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
60}
61
62static inline void init_MUTEX (struct semaphore *sem)
63{
64 sema_init(sem, 1);
65}
66
67static inline void init_MUTEX_LOCKED (struct semaphore *sem)
68{
69 sema_init(sem, 0);
70}
71
72static inline int sem_getcount(struct semaphore *sem)
73{
74 return sem->count;
75}
76
77asmlinkage void __down(struct semaphore * sem);
78asmlinkage int __down_interruptible(struct semaphore * sem);
79asmlinkage void __up(struct semaphore * sem);
80
81/* Semaphores can be `tried' from irq context. So we have to disable
82 * interrupts while we're messing with the semaphore. Sorry.
83 */
84
85static inline void down(struct semaphore * sem)
86{
87 might_sleep();
88 spin_lock_irq(&sem->sentry);
89 if (sem->count > 0) {
90 sem->count--;
91 } else {
92 __down(sem);
93 }
94 spin_unlock_irq(&sem->sentry);
95}
96
97static inline int down_interruptible(struct semaphore * sem)
98{
99 int ret = 0;
100 might_sleep();
101 spin_lock_irq(&sem->sentry);
102 if (sem->count > 0) {
103 sem->count--;
104 } else {
105 ret = __down_interruptible(sem);
106 }
107 spin_unlock_irq(&sem->sentry);
108 return ret;
109}
110
111/*
112 * down_trylock returns 0 on success, 1 if we failed to get the lock.
113 * May not sleep, but must preserve irq state
114 */
115static inline int down_trylock(struct semaphore * sem)
116{
117 unsigned long flags;
118 int count;
119
120 spin_lock_irqsave(&sem->sentry, flags);
121 count = sem->count - 1;
122 if (count >= 0)
123 sem->count = count;
124 spin_unlock_irqrestore(&sem->sentry, flags);
125 return (count < 0);
126}
127
128/*
129 * Note! This is subtle. We jump to wake people up only if
130 * the semaphore was negative (== somebody was waiting on it).
131 */
132static inline void up(struct semaphore * sem)
133{
134 unsigned long flags;
135
136 spin_lock_irqsave(&sem->sentry, flags);
137 if (sem->count < 0) {
138 __up(sem);
139 } else {
140 sem->count++;
141 }
142 spin_unlock_irqrestore(&sem->sentry, flags);
143}
144
145#endif /* _ASM_PARISC_SEMAPHORE_H */
diff --git a/include/asm-powerpc/semaphore.h b/include/asm-powerpc/semaphore.h
index 48dd32e07749..d9b2034ed1d2 100644
--- a/include/asm-powerpc/semaphore.h
+++ b/include/asm-powerpc/semaphore.h
@@ -1,94 +1 @@
1#ifndef _ASM_POWERPC_SEMAPHORE_H #include <linux/semaphore.h>
2#define _ASM_POWERPC_SEMAPHORE_H
3
4/*
5 * Remove spinlock-based RW semaphores; RW semaphore definitions are
6 * now in rwsem.h and we use the generic lib/rwsem.c implementation.
7 * Rework semaphores to use atomic_dec_if_positive.
8 * -- Paul Mackerras (paulus@samba.org)
9 */
10
11#ifdef __KERNEL__
12
13#include <asm/atomic.h>
14#include <asm/system.h>
15#include <linux/wait.h>
16#include <linux/rwsem.h>
17
18struct semaphore {
19 /*
20 * Note that any negative value of count is equivalent to 0,
21 * but additionally indicates that some process(es) might be
22 * sleeping on `wait'.
23 */
24 atomic_t count;
25 wait_queue_head_t wait;
26};
27
28#define __SEMAPHORE_INITIALIZER(name, n) \
29{ \
30 .count = ATOMIC_INIT(n), \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
32}
33
34#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
36
37#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
38
39static inline void sema_init (struct semaphore *sem, int val)
40{
41 atomic_set(&sem->count, val);
42 init_waitqueue_head(&sem->wait);
43}
44
45static inline void init_MUTEX (struct semaphore *sem)
46{
47 sema_init(sem, 1);
48}
49
50static inline void init_MUTEX_LOCKED (struct semaphore *sem)
51{
52 sema_init(sem, 0);
53}
54
55extern void __down(struct semaphore * sem);
56extern int __down_interruptible(struct semaphore * sem);
57extern void __up(struct semaphore * sem);
58
59static inline void down(struct semaphore * sem)
60{
61 might_sleep();
62
63 /*
64 * Try to get the semaphore, take the slow path if we fail.
65 */
66 if (unlikely(atomic_dec_return(&sem->count) < 0))
67 __down(sem);
68}
69
70static inline int down_interruptible(struct semaphore * sem)
71{
72 int ret = 0;
73
74 might_sleep();
75
76 if (unlikely(atomic_dec_return(&sem->count) < 0))
77 ret = __down_interruptible(sem);
78 return ret;
79}
80
81static inline int down_trylock(struct semaphore * sem)
82{
83 return atomic_dec_if_positive(&sem->count) < 0;
84}
85
86static inline void up(struct semaphore * sem)
87{
88 if (unlikely(atomic_inc_return(&sem->count) <= 0))
89 __up(sem);
90}
91
92#endif /* __KERNEL__ */
93
94#endif /* _ASM_POWERPC_SEMAPHORE_H */
diff --git a/include/asm-s390/semaphore.h b/include/asm-s390/semaphore.h
index 0e7001ad8392..d9b2034ed1d2 100644
--- a/include/asm-s390/semaphore.h
+++ b/include/asm-s390/semaphore.h
@@ -1,107 +1 @@
1/* #include <linux/semaphore.h>
2 * include/asm-s390/semaphore.h
3 *
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 *
7 * Derived from "include/asm-i386/semaphore.h"
8 * (C) Copyright 1996 Linus Torvalds
9 */
10
11#ifndef _S390_SEMAPHORE_H
12#define _S390_SEMAPHORE_H
13
14#include <asm/system.h>
15#include <asm/atomic.h>
16#include <linux/wait.h>
17#include <linux/rwsem.h>
18
19struct semaphore {
20 /*
21 * Note that any negative value of count is equivalent to 0,
22 * but additionally indicates that some process(es) might be
23 * sleeping on `wait'.
24 */
25 atomic_t count;
26 wait_queue_head_t wait;
27};
28
29#define __SEMAPHORE_INITIALIZER(name,count) \
30 { ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
31
32#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
33 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
34
35#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
36
37static inline void sema_init (struct semaphore *sem, int val)
38{
39 atomic_set(&sem->count, val);
40 init_waitqueue_head(&sem->wait);
41}
42
43static inline void init_MUTEX (struct semaphore *sem)
44{
45 sema_init(sem, 1);
46}
47
48static inline void init_MUTEX_LOCKED (struct semaphore *sem)
49{
50 sema_init(sem, 0);
51}
52
53asmlinkage void __down(struct semaphore * sem);
54asmlinkage int __down_interruptible(struct semaphore * sem);
55asmlinkage int __down_trylock(struct semaphore * sem);
56asmlinkage void __up(struct semaphore * sem);
57
58static inline void down(struct semaphore * sem)
59{
60 might_sleep();
61 if (atomic_dec_return(&sem->count) < 0)
62 __down(sem);
63}
64
65static inline int down_interruptible(struct semaphore * sem)
66{
67 int ret = 0;
68
69 might_sleep();
70 if (atomic_dec_return(&sem->count) < 0)
71 ret = __down_interruptible(sem);
72 return ret;
73}
74
75static inline int down_trylock(struct semaphore * sem)
76{
77 int old_val, new_val;
78
79 /*
80 * This inline assembly atomically implements the equivalent
81 * to the following C code:
82 * old_val = sem->count.counter;
83 * if ((new_val = old_val) > 0)
84 * sem->count.counter = --new_val;
85 * In the ppc code this is called atomic_dec_if_positive.
86 */
87 asm volatile(
88 " l %0,0(%3)\n"
89 "0: ltr %1,%0\n"
90 " jle 1f\n"
91 " ahi %1,-1\n"
92 " cs %0,%1,0(%3)\n"
93 " jl 0b\n"
94 "1:"
95 : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count.counter)
96 : "a" (&sem->count.counter), "m" (sem->count.counter)
97 : "cc", "memory");
98 return old_val <= 0;
99}
100
101static inline void up(struct semaphore * sem)
102{
103 if (atomic_inc_return(&sem->count) <= 0)
104 __up(sem);
105}
106
107#endif
diff --git a/include/asm-sh/semaphore-helper.h b/include/asm-sh/semaphore-helper.h
deleted file mode 100644
index bd8230c369ca..000000000000
--- a/include/asm-sh/semaphore-helper.h
+++ /dev/null
@@ -1,89 +0,0 @@
1#ifndef __ASM_SH_SEMAPHORE_HELPER_H
2#define __ASM_SH_SEMAPHORE_HELPER_H
3
4/*
5 * SMP- and interrupt-safe semaphores helper functions.
6 *
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1999 Andrea Arcangeli
9 */
10
11/*
12 * These two _must_ execute atomically wrt each other.
13 *
14 * This is trivially done with load_locked/store_cond,
15 * which we have. Let the rest of the losers suck eggs.
16 */
17static __inline__ void wake_one_more(struct semaphore * sem)
18{
19 atomic_inc((atomic_t *)&sem->sleepers);
20}
21
22static __inline__ int waking_non_zero(struct semaphore *sem)
23{
24 unsigned long flags;
25 int ret = 0;
26
27 spin_lock_irqsave(&semaphore_wake_lock, flags);
28 if (sem->sleepers > 0) {
29 sem->sleepers--;
30 ret = 1;
31 }
32 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
33 return ret;
34}
35
36/*
37 * waking_non_zero_interruptible:
38 * 1 got the lock
39 * 0 go to sleep
40 * -EINTR interrupted
41 *
42 * We must undo the sem->count down_interruptible() increment while we are
43 * protected by the spinlock in order to make atomic this atomic_inc() with the
44 * atomic_read() in wake_one_more(), otherwise we can race. -arca
45 */
46static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
47 struct task_struct *tsk)
48{
49 unsigned long flags;
50 int ret = 0;
51
52 spin_lock_irqsave(&semaphore_wake_lock, flags);
53 if (sem->sleepers > 0) {
54 sem->sleepers--;
55 ret = 1;
56 } else if (signal_pending(tsk)) {
57 atomic_inc(&sem->count);
58 ret = -EINTR;
59 }
60 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
61 return ret;
62}
63
64/*
65 * waking_non_zero_trylock:
66 * 1 failed to lock
67 * 0 got the lock
68 *
69 * We must undo the sem->count down_trylock() increment while we are
70 * protected by the spinlock in order to make atomic this atomic_inc() with the
71 * atomic_read() in wake_one_more(), otherwise we can race. -arca
72 */
73static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
74{
75 unsigned long flags;
76 int ret = 1;
77
78 spin_lock_irqsave(&semaphore_wake_lock, flags);
79 if (sem->sleepers <= 0)
80 atomic_inc(&sem->count);
81 else {
82 sem->sleepers--;
83 ret = 0;
84 }
85 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
86 return ret;
87}
88
89#endif /* __ASM_SH_SEMAPHORE_HELPER_H */
diff --git a/include/asm-sh/semaphore.h b/include/asm-sh/semaphore.h
index 9e5a37c4dce2..d9b2034ed1d2 100644
--- a/include/asm-sh/semaphore.h
+++ b/include/asm-sh/semaphore.h
@@ -1,115 +1 @@
1#ifndef __ASM_SH_SEMAPHORE_H #include <linux/semaphore.h>
2#define __ASM_SH_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7/*
8 * SMP- and interrupt-safe semaphores.
9 *
10 * (C) Copyright 1996 Linus Torvalds
11 *
12 * SuperH verison by Niibe Yutaka
13 * (Currently no asm implementation but generic C code...)
14 */
15
16#include <linux/spinlock.h>
17#include <linux/rwsem.h>
18#include <linux/wait.h>
19
20#include <asm/system.h>
21#include <asm/atomic.h>
22
23struct semaphore {
24 atomic_t count;
25 int sleepers;
26 wait_queue_head_t wait;
27};
28
29#define __SEMAPHORE_INITIALIZER(name, n) \
30{ \
31 .count = ATOMIC_INIT(n), \
32 .sleepers = 0, \
33 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
34}
35
36#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
37 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
38
39#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
40
41static inline void sema_init (struct semaphore *sem, int val)
42{
43/*
44 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
45 *
46 * i'd rather use the more flexible initialization above, but sadly
47 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
48 */
49 atomic_set(&sem->count, val);
50 sem->sleepers = 0;
51 init_waitqueue_head(&sem->wait);
52}
53
54static inline void init_MUTEX (struct semaphore *sem)
55{
56 sema_init(sem, 1);
57}
58
59static inline void init_MUTEX_LOCKED (struct semaphore *sem)
60{
61 sema_init(sem, 0);
62}
63
64#if 0
65asmlinkage void __down_failed(void /* special register calling convention */);
66asmlinkage int __down_failed_interruptible(void /* params in registers */);
67asmlinkage int __down_failed_trylock(void /* params in registers */);
68asmlinkage void __up_wakeup(void /* special register calling convention */);
69#endif
70
71asmlinkage void __down(struct semaphore * sem);
72asmlinkage int __down_interruptible(struct semaphore * sem);
73asmlinkage int __down_trylock(struct semaphore * sem);
74asmlinkage void __up(struct semaphore * sem);
75
76extern spinlock_t semaphore_wake_lock;
77
78static inline void down(struct semaphore * sem)
79{
80 might_sleep();
81 if (atomic_dec_return(&sem->count) < 0)
82 __down(sem);
83}
84
85static inline int down_interruptible(struct semaphore * sem)
86{
87 int ret = 0;
88
89 might_sleep();
90 if (atomic_dec_return(&sem->count) < 0)
91 ret = __down_interruptible(sem);
92 return ret;
93}
94
95static inline int down_trylock(struct semaphore * sem)
96{
97 int ret = 0;
98
99 if (atomic_dec_return(&sem->count) < 0)
100 ret = __down_trylock(sem);
101 return ret;
102}
103
104/*
105 * Note! This is subtle. We jump to wake people up only if
106 * the semaphore was negative (== somebody was waiting on it).
107 */
108static inline void up(struct semaphore * sem)
109{
110 if (atomic_inc_return(&sem->count) <= 0)
111 __up(sem);
112}
113
114#endif
115#endif /* __ASM_SH_SEMAPHORE_H */
diff --git a/include/asm-sparc/semaphore.h b/include/asm-sparc/semaphore.h
index 8018f9f4d497..d9b2034ed1d2 100644
--- a/include/asm-sparc/semaphore.h
+++ b/include/asm-sparc/semaphore.h
@@ -1,192 +1 @@
1#ifndef _SPARC_SEMAPHORE_H #include <linux/semaphore.h>
2#define _SPARC_SEMAPHORE_H
3
4/* Dinky, good for nothing, just barely irq safe, Sparc semaphores. */
5
6#ifdef __KERNEL__
7
8#include <asm/atomic.h>
9#include <linux/wait.h>
10#include <linux/rwsem.h>
11
12struct semaphore {
13 atomic24_t count;
14 int sleepers;
15 wait_queue_head_t wait;
16};
17
18#define __SEMAPHORE_INITIALIZER(name, n) \
19{ \
20 .count = ATOMIC24_INIT(n), \
21 .sleepers = 0, \
22 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
23}
24
25#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
26 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
27
28#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
29
30static inline void sema_init (struct semaphore *sem, int val)
31{
32 atomic24_set(&sem->count, val);
33 sem->sleepers = 0;
34 init_waitqueue_head(&sem->wait);
35}
36
37static inline void init_MUTEX (struct semaphore *sem)
38{
39 sema_init(sem, 1);
40}
41
42static inline void init_MUTEX_LOCKED (struct semaphore *sem)
43{
44 sema_init(sem, 0);
45}
46
47extern void __down(struct semaphore * sem);
48extern int __down_interruptible(struct semaphore * sem);
49extern int __down_trylock(struct semaphore * sem);
50extern void __up(struct semaphore * sem);
51
52static inline void down(struct semaphore * sem)
53{
54 register volatile int *ptr asm("g1");
55 register int increment asm("g2");
56
57 might_sleep();
58
59 ptr = &(sem->count.counter);
60 increment = 1;
61
62 __asm__ __volatile__(
63 "mov %%o7, %%g4\n\t"
64 "call ___atomic24_sub\n\t"
65 " add %%o7, 8, %%o7\n\t"
66 "tst %%g2\n\t"
67 "bl 2f\n\t"
68 " nop\n"
69 "1:\n\t"
70 ".subsection 2\n"
71 "2:\n\t"
72 "save %%sp, -64, %%sp\n\t"
73 "mov %%g1, %%l1\n\t"
74 "mov %%g5, %%l5\n\t"
75 "call %3\n\t"
76 " mov %%g1, %%o0\n\t"
77 "mov %%l1, %%g1\n\t"
78 "ba 1b\n\t"
79 " restore %%l5, %%g0, %%g5\n\t"
80 ".previous\n"
81 : "=&r" (increment)
82 : "0" (increment), "r" (ptr), "i" (__down)
83 : "g3", "g4", "g7", "memory", "cc");
84}
85
86static inline int down_interruptible(struct semaphore * sem)
87{
88 register volatile int *ptr asm("g1");
89 register int increment asm("g2");
90
91 might_sleep();
92
93 ptr = &(sem->count.counter);
94 increment = 1;
95
96 __asm__ __volatile__(
97 "mov %%o7, %%g4\n\t"
98 "call ___atomic24_sub\n\t"
99 " add %%o7, 8, %%o7\n\t"
100 "tst %%g2\n\t"
101 "bl 2f\n\t"
102 " clr %%g2\n"
103 "1:\n\t"
104 ".subsection 2\n"
105 "2:\n\t"
106 "save %%sp, -64, %%sp\n\t"
107 "mov %%g1, %%l1\n\t"
108 "mov %%g5, %%l5\n\t"
109 "call %3\n\t"
110 " mov %%g1, %%o0\n\t"
111 "mov %%l1, %%g1\n\t"
112 "mov %%l5, %%g5\n\t"
113 "ba 1b\n\t"
114 " restore %%o0, %%g0, %%g2\n\t"
115 ".previous\n"
116 : "=&r" (increment)
117 : "0" (increment), "r" (ptr), "i" (__down_interruptible)
118 : "g3", "g4", "g7", "memory", "cc");
119
120 return increment;
121}
122
123static inline int down_trylock(struct semaphore * sem)
124{
125 register volatile int *ptr asm("g1");
126 register int increment asm("g2");
127
128 ptr = &(sem->count.counter);
129 increment = 1;
130
131 __asm__ __volatile__(
132 "mov %%o7, %%g4\n\t"
133 "call ___atomic24_sub\n\t"
134 " add %%o7, 8, %%o7\n\t"
135 "tst %%g2\n\t"
136 "bl 2f\n\t"
137 " clr %%g2\n"
138 "1:\n\t"
139 ".subsection 2\n"
140 "2:\n\t"
141 "save %%sp, -64, %%sp\n\t"
142 "mov %%g1, %%l1\n\t"
143 "mov %%g5, %%l5\n\t"
144 "call %3\n\t"
145 " mov %%g1, %%o0\n\t"
146 "mov %%l1, %%g1\n\t"
147 "mov %%l5, %%g5\n\t"
148 "ba 1b\n\t"
149 " restore %%o0, %%g0, %%g2\n\t"
150 ".previous\n"
151 : "=&r" (increment)
152 : "0" (increment), "r" (ptr), "i" (__down_trylock)
153 : "g3", "g4", "g7", "memory", "cc");
154
155 return increment;
156}
157
158static inline void up(struct semaphore * sem)
159{
160 register volatile int *ptr asm("g1");
161 register int increment asm("g2");
162
163 ptr = &(sem->count.counter);
164 increment = 1;
165
166 __asm__ __volatile__(
167 "mov %%o7, %%g4\n\t"
168 "call ___atomic24_add\n\t"
169 " add %%o7, 8, %%o7\n\t"
170 "tst %%g2\n\t"
171 "ble 2f\n\t"
172 " nop\n"
173 "1:\n\t"
174 ".subsection 2\n"
175 "2:\n\t"
176 "save %%sp, -64, %%sp\n\t"
177 "mov %%g1, %%l1\n\t"
178 "mov %%g5, %%l5\n\t"
179 "call %3\n\t"
180 " mov %%g1, %%o0\n\t"
181 "mov %%l1, %%g1\n\t"
182 "ba 1b\n\t"
183 " restore %%l5, %%g0, %%g5\n\t"
184 ".previous\n"
185 : "=&r" (increment)
186 : "0" (increment), "r" (ptr), "i" (__up)
187 : "g3", "g4", "g7", "memory", "cc");
188}
189
190#endif /* __KERNEL__ */
191
192#endif /* !(_SPARC_SEMAPHORE_H) */
diff --git a/include/asm-sparc64/semaphore.h b/include/asm-sparc64/semaphore.h
index 7f7c0c4e024f..d9b2034ed1d2 100644
--- a/include/asm-sparc64/semaphore.h
+++ b/include/asm-sparc64/semaphore.h
@@ -1,53 +1 @@
1#ifndef _SPARC64_SEMAPHORE_H #include <linux/semaphore.h>
2#define _SPARC64_SEMAPHORE_H
3
4/* These are actually reasonable on the V9.
5 *
6 * See asm-ppc/semaphore.h for implementation commentary,
7 * only sparc64 specific issues are commented here.
8 */
9#ifdef __KERNEL__
10
11#include <asm/atomic.h>
12#include <asm/system.h>
13#include <linux/wait.h>
14#include <linux/rwsem.h>
15
16struct semaphore {
17 atomic_t count;
18 wait_queue_head_t wait;
19};
20
21#define __SEMAPHORE_INITIALIZER(name, count) \
22 { ATOMIC_INIT(count), \
23 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
24
25#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
26 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
27
28#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
29
30static inline void sema_init (struct semaphore *sem, int val)
31{
32 atomic_set(&sem->count, val);
33 init_waitqueue_head(&sem->wait);
34}
35
36static inline void init_MUTEX (struct semaphore *sem)
37{
38 sema_init(sem, 1);
39}
40
41static inline void init_MUTEX_LOCKED (struct semaphore *sem)
42{
43 sema_init(sem, 0);
44}
45
46extern void up(struct semaphore *sem);
47extern void down(struct semaphore *sem);
48extern int down_trylock(struct semaphore *sem);
49extern int down_interruptible(struct semaphore *sem);
50
51#endif /* __KERNEL__ */
52
53#endif /* !(_SPARC64_SEMAPHORE_H) */
diff --git a/include/asm-um/semaphore.h b/include/asm-um/semaphore.h
index ff13c34de421..d9b2034ed1d2 100644
--- a/include/asm-um/semaphore.h
+++ b/include/asm-um/semaphore.h
@@ -1,6 +1 @@
1#ifndef __UM_SEMAPHORE_H #include <linux/semaphore.h>
2#define __UM_SEMAPHORE_H
3
4#include "asm/arch/semaphore.h"
5
6#endif
diff --git a/include/asm-v850/semaphore.h b/include/asm-v850/semaphore.h
index 10ed0ccf37df..d9b2034ed1d2 100644
--- a/include/asm-v850/semaphore.h
+++ b/include/asm-v850/semaphore.h
@@ -1,84 +1 @@
1#ifndef __V850_SEMAPHORE_H__ #include <linux/semaphore.h>
2#define __V850_SEMAPHORE_H__
3
4#include <linux/linkage.h>
5#include <linux/spinlock.h>
6#include <linux/wait.h>
7#include <linux/rwsem.h>
8
9#include <asm/atomic.h>
10
11struct semaphore {
12 atomic_t count;
13 int sleepers;
14 wait_queue_head_t wait;
15};
16
17#define __SEMAPHORE_INITIALIZER(name,count) \
18 { ATOMIC_INIT (count), 0, \
19 __WAIT_QUEUE_HEAD_INITIALIZER ((name).wait) }
20
21#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
22 struct semaphore name = __SEMAPHORE_INITIALIZER (name,count)
23
24#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC (name,1)
25
26static inline void sema_init (struct semaphore *sem, int val)
27{
28 *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
29}
30
31static inline void init_MUTEX (struct semaphore *sem)
32{
33 sema_init (sem, 1);
34}
35
36static inline void init_MUTEX_LOCKED (struct semaphore *sem)
37{
38 sema_init (sem, 0);
39}
40
41/*
42 * special register calling convention
43 */
44asmlinkage void __down_failed (void);
45asmlinkage int __down_interruptible_failed (void);
46asmlinkage int __down_trylock_failed (void);
47asmlinkage void __up_wakeup (void);
48
49extern void __down (struct semaphore * sem);
50extern int __down_interruptible (struct semaphore * sem);
51extern int __down_trylock (struct semaphore * sem);
52extern void __up (struct semaphore * sem);
53
54static inline void down (struct semaphore * sem)
55{
56 might_sleep();
57 if (atomic_dec_return (&sem->count) < 0)
58 __down (sem);
59}
60
61static inline int down_interruptible (struct semaphore * sem)
62{
63 int ret = 0;
64 might_sleep();
65 if (atomic_dec_return (&sem->count) < 0)
66 ret = __down_interruptible (sem);
67 return ret;
68}
69
70static inline int down_trylock (struct semaphore *sem)
71{
72 int ret = 0;
73 if (atomic_dec_return (&sem->count) < 0)
74 ret = __down_trylock (sem);
75 return ret;
76}
77
78static inline void up (struct semaphore * sem)
79{
80 if (atomic_inc_return (&sem->count) <= 0)
81 __up (sem);
82}
83
84#endif /* __V850_SEMAPHORE_H__ */
diff --git a/include/asm-x86/semaphore.h b/include/asm-x86/semaphore.h
index 572c0b67a6b0..d9b2034ed1d2 100644
--- a/include/asm-x86/semaphore.h
+++ b/include/asm-x86/semaphore.h
@@ -1,5 +1 @@
1#ifdef CONFIG_X86_32 #include <linux/semaphore.h>
2# include "semaphore_32.h"
3#else
4# include "semaphore_64.h"
5#endif
diff --git a/include/asm-x86/semaphore_32.h b/include/asm-x86/semaphore_32.h
deleted file mode 100644
index ac96d3804d0c..000000000000
--- a/include/asm-x86/semaphore_32.h
+++ /dev/null
@@ -1,175 +0,0 @@
1#ifndef _I386_SEMAPHORE_H
2#define _I386_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <linux/wait.h>
42#include <linux/rwsem.h>
43
44struct semaphore {
45 atomic_t count;
46 int sleepers;
47 wait_queue_head_t wait;
48};
49
50
51#define __SEMAPHORE_INITIALIZER(name, n) \
52{ \
53 .count = ATOMIC_INIT(n), \
54 .sleepers = 0, \
55 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
56}
57
58#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
59 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
60
61#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
62
63static inline void sema_init (struct semaphore *sem, int val)
64{
65/*
66 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
67 *
68 * i'd rather use the more flexible initialization above, but sadly
69 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
70 */
71 atomic_set(&sem->count, val);
72 sem->sleepers = 0;
73 init_waitqueue_head(&sem->wait);
74}
75
76static inline void init_MUTEX (struct semaphore *sem)
77{
78 sema_init(sem, 1);
79}
80
81static inline void init_MUTEX_LOCKED (struct semaphore *sem)
82{
83 sema_init(sem, 0);
84}
85
86extern asmregparm void __down_failed(atomic_t *count_ptr);
87extern asmregparm int __down_failed_interruptible(atomic_t *count_ptr);
88extern asmregparm int __down_failed_trylock(atomic_t *count_ptr);
89extern asmregparm void __up_wakeup(atomic_t *count_ptr);
90
91/*
92 * This is ugly, but we want the default case to fall through.
93 * "__down_failed" is a special asm handler that calls the C
94 * routine that actually waits. See arch/i386/kernel/semaphore.c
95 */
96static inline void down(struct semaphore * sem)
97{
98 might_sleep();
99 __asm__ __volatile__(
100 "# atomic down operation\n\t"
101 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
102 "jns 2f\n"
103 "\tlea %0,%%eax\n\t"
104 "call __down_failed\n"
105 "2:"
106 :"+m" (sem->count)
107 :
108 :"memory","ax");
109}
110
111/*
112 * Interruptible try to acquire a semaphore. If we obtained
113 * it, return zero. If we were interrupted, returns -EINTR
114 */
115static inline int down_interruptible(struct semaphore * sem)
116{
117 int result;
118
119 might_sleep();
120 __asm__ __volatile__(
121 "# atomic interruptible down operation\n\t"
122 "xorl %0,%0\n\t"
123 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
124 "jns 2f\n\t"
125 "lea %1,%%eax\n\t"
126 "call __down_failed_interruptible\n"
127 "2:"
128 :"=&a" (result), "+m" (sem->count)
129 :
130 :"memory");
131 return result;
132}
133
134/*
135 * Non-blockingly attempt to down() a semaphore.
136 * Returns zero if we acquired it
137 */
138static inline int down_trylock(struct semaphore * sem)
139{
140 int result;
141
142 __asm__ __volatile__(
143 "# atomic interruptible down operation\n\t"
144 "xorl %0,%0\n\t"
145 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
146 "jns 2f\n\t"
147 "lea %1,%%eax\n\t"
148 "call __down_failed_trylock\n\t"
149 "2:\n"
150 :"=&a" (result), "+m" (sem->count)
151 :
152 :"memory");
153 return result;
154}
155
156/*
157 * Note! This is subtle. We jump to wake people up only if
158 * the semaphore was negative (== somebody was waiting on it).
159 */
160static inline void up(struct semaphore * sem)
161{
162 __asm__ __volatile__(
163 "# atomic up operation\n\t"
164 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
165 "jg 1f\n\t"
166 "lea %0,%%eax\n\t"
167 "call __up_wakeup\n"
168 "1:"
169 :"+m" (sem->count)
170 :
171 :"memory","ax");
172}
173
174#endif
175#endif
diff --git a/include/asm-x86/semaphore_64.h b/include/asm-x86/semaphore_64.h
deleted file mode 100644
index 79694306bf7d..000000000000
--- a/include/asm-x86/semaphore_64.h
+++ /dev/null
@@ -1,180 +0,0 @@
1#ifndef _X86_64_SEMAPHORE_H
2#define _X86_64_SEMAPHORE_H
3
4#include <linux/linkage.h>
5
6#ifdef __KERNEL__
7
8/*
9 * SMP- and interrupt-safe semaphores..
10 *
11 * (C) Copyright 1996 Linus Torvalds
12 *
13 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
14 * the original code and to make semaphore waits
15 * interruptible so that processes waiting on
16 * semaphores can be killed.
17 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
18 * functions in asm/sempahore-helper.h while fixing a
19 * potential and subtle race discovered by Ulrich Schmid
20 * in down_interruptible(). Since I started to play here I
21 * also implemented the `trylock' semaphore operation.
22 * 1999-07-02 Artur Skawina <skawina@geocities.com>
23 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
24 * do this). Changed calling sequences from push/jmp to
25 * traditional call/ret.
26 * Modified 2001-01-01 Andreas Franck <afranck@gmx.de>
27 * Some hacks to ensure compatibility with recent
28 * GCC snapshots, to avoid stack corruption when compiling
29 * with -fomit-frame-pointer. It's not sure if this will
30 * be fixed in GCC, as our previous implementation was a
31 * bit dubious.
32 *
33 * If you would like to see an analysis of this implementation, please
34 * ftp to gcom.com and download the file
35 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
36 *
37 */
38
39#include <asm/system.h>
40#include <asm/atomic.h>
41#include <asm/rwlock.h>
42#include <linux/wait.h>
43#include <linux/rwsem.h>
44#include <linux/stringify.h>
45
46struct semaphore {
47 atomic_t count;
48 int sleepers;
49 wait_queue_head_t wait;
50};
51
52#define __SEMAPHORE_INITIALIZER(name, n) \
53{ \
54 .count = ATOMIC_INIT(n), \
55 .sleepers = 0, \
56 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
57}
58
59#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
60 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
61
62#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
63
64static inline void sema_init (struct semaphore *sem, int val)
65{
66/*
67 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
68 *
69 * i'd rather use the more flexible initialization above, but sadly
70 * GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
71 */
72 atomic_set(&sem->count, val);
73 sem->sleepers = 0;
74 init_waitqueue_head(&sem->wait);
75}
76
77static inline void init_MUTEX (struct semaphore *sem)
78{
79 sema_init(sem, 1);
80}
81
82static inline void init_MUTEX_LOCKED (struct semaphore *sem)
83{
84 sema_init(sem, 0);
85}
86
87asmlinkage void __down_failed(void /* special register calling convention */);
88asmlinkage int __down_failed_interruptible(void /* params in registers */);
89asmlinkage int __down_failed_trylock(void /* params in registers */);
90asmlinkage void __up_wakeup(void /* special register calling convention */);
91
92asmlinkage void __down(struct semaphore * sem);
93asmlinkage int __down_interruptible(struct semaphore * sem);
94asmlinkage int __down_trylock(struct semaphore * sem);
95asmlinkage void __up(struct semaphore * sem);
96
97/*
98 * This is ugly, but we want the default case to fall through.
99 * "__down_failed" is a special asm handler that calls the C
100 * routine that actually waits. See arch/x86_64/kernel/semaphore.c
101 */
102static inline void down(struct semaphore * sem)
103{
104 might_sleep();
105
106 __asm__ __volatile__(
107 "# atomic down operation\n\t"
108 LOCK_PREFIX "decl %0\n\t" /* --sem->count */
109 "jns 1f\n\t"
110 "call __down_failed\n"
111 "1:"
112 :"=m" (sem->count)
113 :"D" (sem)
114 :"memory");
115}
116
117/*
118 * Interruptible try to acquire a semaphore. If we obtained
119 * it, return zero. If we were interrupted, returns -EINTR
120 */
121static inline int down_interruptible(struct semaphore * sem)
122{
123 int result;
124
125 might_sleep();
126
127 __asm__ __volatile__(
128 "# atomic interruptible down operation\n\t"
129 "xorl %0,%0\n\t"
130 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
131 "jns 2f\n\t"
132 "call __down_failed_interruptible\n"
133 "2:\n"
134 :"=&a" (result), "=m" (sem->count)
135 :"D" (sem)
136 :"memory");
137 return result;
138}
139
140/*
141 * Non-blockingly attempt to down() a semaphore.
142 * Returns zero if we acquired it
143 */
144static inline int down_trylock(struct semaphore * sem)
145{
146 int result;
147
148 __asm__ __volatile__(
149 "# atomic interruptible down operation\n\t"
150 "xorl %0,%0\n\t"
151 LOCK_PREFIX "decl %1\n\t" /* --sem->count */
152 "jns 2f\n\t"
153 "call __down_failed_trylock\n\t"
154 "2:\n"
155 :"=&a" (result), "=m" (sem->count)
156 :"D" (sem)
157 :"memory","cc");
158 return result;
159}
160
161/*
162 * Note! This is subtle. We jump to wake people up only if
163 * the semaphore was negative (== somebody was waiting on it).
164 * The default case (no contention) will result in NO
165 * jumps for both down() and up().
166 */
167static inline void up(struct semaphore * sem)
168{
169 __asm__ __volatile__(
170 "# atomic up operation\n\t"
171 LOCK_PREFIX "incl %0\n\t" /* ++sem->count */
172 "jg 1f\n\t"
173 "call __up_wakeup\n"
174 "1:"
175 :"=m" (sem->count)
176 :"D" (sem)
177 :"memory");
178}
179#endif /* __KERNEL__ */
180#endif
diff --git a/include/asm-xtensa/semaphore.h b/include/asm-xtensa/semaphore.h
index 3e04167cd9dc..d9b2034ed1d2 100644
--- a/include/asm-xtensa/semaphore.h
+++ b/include/asm-xtensa/semaphore.h
@@ -1,99 +1 @@
1/* #include <linux/semaphore.h>
2 * linux/include/asm-xtensa/semaphore.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc.
9 */
10
11#ifndef _XTENSA_SEMAPHORE_H
12#define _XTENSA_SEMAPHORE_H
13
14#include <asm/atomic.h>
15#include <asm/system.h>
16#include <linux/wait.h>
17#include <linux/rwsem.h>
18
19struct semaphore {
20 atomic_t count;
21 int sleepers;
22 wait_queue_head_t wait;
23};
24
25#define __SEMAPHORE_INITIALIZER(name,n) \
26{ \
27 .count = ATOMIC_INIT(n), \
28 .sleepers = 0, \
29 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
30}
31
32#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
33 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
34
35#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
36
37static inline void sema_init (struct semaphore *sem, int val)
38{
39 atomic_set(&sem->count, val);
40 sem->sleepers = 0;
41 init_waitqueue_head(&sem->wait);
42}
43
44static inline void init_MUTEX (struct semaphore *sem)
45{
46 sema_init(sem, 1);
47}
48
49static inline void init_MUTEX_LOCKED (struct semaphore *sem)
50{
51 sema_init(sem, 0);
52}
53
54asmlinkage void __down(struct semaphore * sem);
55asmlinkage int __down_interruptible(struct semaphore * sem);
56asmlinkage int __down_trylock(struct semaphore * sem);
57asmlinkage void __up(struct semaphore * sem);
58
59extern spinlock_t semaphore_wake_lock;
60
61static inline void down(struct semaphore * sem)
62{
63 might_sleep();
64
65 if (atomic_sub_return(1, &sem->count) < 0)
66 __down(sem);
67}
68
69static inline int down_interruptible(struct semaphore * sem)
70{
71 int ret = 0;
72
73 might_sleep();
74
75 if (atomic_sub_return(1, &sem->count) < 0)
76 ret = __down_interruptible(sem);
77 return ret;
78}
79
80static inline int down_trylock(struct semaphore * sem)
81{
82 int ret = 0;
83
84 if (atomic_sub_return(1, &sem->count) < 0)
85 ret = __down_trylock(sem);
86 return ret;
87}
88
89/*
90 * Note! This is subtle. We jump to wake people up only if
91 * the semaphore was negative (== somebody was waiting on it).
92 */
93static inline void up(struct semaphore * sem)
94{
95 if (atomic_add_return(1, &sem->count) <= 0)
96 __up(sem);
97}
98
99#endif /* _XTENSA_SEMAPHORE_H */
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
new file mode 100644
index 000000000000..b3c691b089b2
--- /dev/null
+++ b/include/linux/semaphore.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
4 *
5 * Distributed under the terms of the GNU GPL, version 2
6 *
7 * Counting semaphores allow up to <n> tasks to acquire the semaphore
8 * simultaneously.
9 */
10#ifndef __LINUX_SEMAPHORE_H
11#define __LINUX_SEMAPHORE_H
12
13#include <linux/list.h>
14#include <linux/spinlock.h>
15
16/*
17 * The spinlock controls access to the other members of the semaphore.
18 * 'count' is decremented by every task which calls down*() and incremented
19 * by every call to up(). Thus, if it is positive, it indicates how many
20 * more tasks may acquire the lock. If it is negative, it indicates how
21 * many tasks are waiting for the lock. Tasks waiting for the lock are
22 * kept on the wait_list.
23 */
24struct semaphore {
25 spinlock_t lock;
26 int count;
27 struct list_head wait_list;
28};
29
30#define __SEMAPHORE_INITIALIZER(name, n) \
31{ \
32 .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
33 .count = n, \
34 .wait_list = LIST_HEAD_INIT((name).wait_list), \
35}
36
37#define __DECLARE_SEMAPHORE_GENERIC(name, count) \
38 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
39
40#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
41
42static inline void sema_init(struct semaphore *sem, int val)
43{
44 static struct lock_class_key __key;
45 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
46 lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
47}
48
49#define init_MUTEX(sem) sema_init(sem, 1)
50#define init_MUTEX_LOCKED(sem) sema_init(sem, 0)
51
52/*
53 * Attempt to acquire the semaphore. If another task is already holding the
54 * semaphore, sleep until the semaphore is released.
55 */
56extern void down(struct semaphore *sem);
57
58/*
59 * As down(), except the sleep may be interrupted by a signal. If it is,
60 * this function will return -EINTR.
61 */
62extern int __must_check down_interruptible(struct semaphore *sem);
63
64/*
65 * As down(), except this function will not sleep. It will return 0 if it
66 * acquired the semaphore and 1 if the semaphore was contended. This
67 * function may be called from any context, including interrupt and softirq.
68 */
69extern int __must_check down_trylock(struct semaphore *sem);
70
71/*
72 * Release the semaphore. Unlike mutexes, up() may be called from any
73 * context and even by tasks which have never called down().
74 */
75extern void up(struct semaphore *sem);
76
77#endif /* __LINUX_SEMAPHORE_H */
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c584c55a6e9..f45c69e69688 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o pm_qos_params.o 12 notifier.o ksysfs.o pm_qos_params.o
13 13
14obj-$(CONFIG_SYSCTL) += sysctl_check.o 14obj-$(CONFIG_SYSCTL) += sysctl_check.o
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
new file mode 100644
index 000000000000..d5a72702f261
--- /dev/null
+++ b/kernel/semaphore.c
@@ -0,0 +1,187 @@
1/*
2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
4 *
5 * Distributed under the terms of the GNU GPL, version 2
6 */
7
8#include <linux/compiler.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/semaphore.h>
13#include <linux/spinlock.h>
14
15/*
16 * Some notes on the implementation:
17 *
18 * down_trylock() and up() can be called from interrupt context.
19 * So we have to disable interrupts when taking the lock.
20 *
21 * The ->count variable, if positive, defines how many more tasks can
22 * acquire the semaphore. If negative, it represents how many tasks are
23 * waiting on the semaphore (*). If zero, no tasks are waiting, and no more
24 * tasks can acquire the semaphore.
25 *
26 * (*) Except for the window between one task calling up() and the task
27 * sleeping in a __down_common() waking up. In order to avoid a third task
28 * coming in and stealing the second task's wakeup, we leave the ->count
29 * negative. If we have a more complex situation, the ->count may become
30 * zero or negative (eg a semaphore with count = 2, three tasks attempt to
31 * acquire it, one sleeps, two finish and call up(), the second task to call
32 * up() notices that the list is empty and just increments count).
33 */
34
35static noinline void __down(struct semaphore *sem);
36static noinline int __down_interruptible(struct semaphore *sem);
37static noinline void __up(struct semaphore *sem);
38
39void down(struct semaphore *sem)
40{
41 unsigned long flags;
42
43 spin_lock_irqsave(&sem->lock, flags);
44 if (unlikely(sem->count-- <= 0))
45 __down(sem);
46 spin_unlock_irqrestore(&sem->lock, flags);
47}
48EXPORT_SYMBOL(down);
49
50int down_interruptible(struct semaphore *sem)
51{
52 unsigned long flags;
53 int result = 0;
54
55 spin_lock_irqsave(&sem->lock, flags);
56 if (unlikely(sem->count-- <= 0))
57 result = __down_interruptible(sem);
58 spin_unlock_irqrestore(&sem->lock, flags);
59
60 return result;
61}
62EXPORT_SYMBOL(down_interruptible);
63
64/**
65 * down_trylock - try to acquire the semaphore, without waiting
66 * @sem: the semaphore to be acquired
67 *
68 * Try to acquire the semaphore atomically. Returns 0 if the mutex has
69 * been acquired successfully and 1 if it is contended.
70 *
71 * NOTE: This return value is inverted from both spin_trylock and
72 * mutex_trylock! Be careful about this when converting code.
73 *
74 * Unlike mutex_trylock, this function can be used from interrupt context,
75 * and the semaphore can be released by any task or interrupt.
76 */
77int down_trylock(struct semaphore *sem)
78{
79 unsigned long flags;
80 int count;
81
82 spin_lock_irqsave(&sem->lock, flags);
83 count = sem->count - 1;
84 if (likely(count >= 0))
85 sem->count = count;
86 spin_unlock_irqrestore(&sem->lock, flags);
87
88 return (count < 0);
89}
90EXPORT_SYMBOL(down_trylock);
91
92void up(struct semaphore *sem)
93{
94 unsigned long flags;
95
96 spin_lock_irqsave(&sem->lock, flags);
97 if (likely(sem->count >= 0))
98 sem->count++;
99 else
100 __up(sem);
101 spin_unlock_irqrestore(&sem->lock, flags);
102}
103EXPORT_SYMBOL(up);
104
105/* Functions for the contended case */
106
107struct semaphore_waiter {
108 struct list_head list;
109 struct task_struct *task;
110 int up;
111};
112
113/*
114 * Wake up a process waiting on a semaphore. We need to call this from both
115 * __up and __down_common as it's possible to race a task into the semaphore
116 * if it comes in at just the right time between two tasks calling up() and
117 * a third task waking up. This function assumes the wait_list is already
118 * checked for being non-empty.
119 */
120static noinline void __sched __up_down_common(struct semaphore *sem)
121{
122 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
123 struct semaphore_waiter, list);
124 list_del(&waiter->list);
125 waiter->up = 1;
126 wake_up_process(waiter->task);
127}
128
129/*
130 * Because this function is inlined, the 'state' parameter will be constant,
131 * and thus optimised away by the compiler.
132 */
133static inline int __sched __down_common(struct semaphore *sem, long state)
134{
135 int result = 0;
136 struct task_struct *task = current;
137 struct semaphore_waiter waiter;
138
139 list_add_tail(&waiter.list, &sem->wait_list);
140 waiter.task = task;
141 waiter.up = 0;
142
143 for (;;) {
144 if (state == TASK_INTERRUPTIBLE && signal_pending(task))
145 goto interrupted;
146 __set_task_state(task, state);
147 spin_unlock_irq(&sem->lock);
148 schedule();
149 spin_lock_irq(&sem->lock);
150 if (waiter.up)
151 goto woken;
152 }
153
154 interrupted:
155 list_del(&waiter.list);
156 result = -EINTR;
157 woken:
158 /*
159 * Account for the process which woke us up. For the case where
160 * we're interrupted, we need to increment the count on our own
161 * behalf. I don't believe we can hit the case where the
162 * sem->count hits zero, *and* there's a second task sleeping,
163 * but it doesn't hurt, that's not a commonly exercised path and
164 * it's not a performance path either.
165 */
166 if (unlikely((++sem->count >= 0) && !list_empty(&sem->wait_list)))
167 __up_down_common(sem);
168 return result;
169}
170
171static noinline void __sched __down(struct semaphore *sem)
172{
173 __down_common(sem, TASK_UNINTERRUPTIBLE);
174}
175
176static noinline int __sched __down_interruptible(struct semaphore *sem)
177{
178 return __down_common(sem, TASK_INTERRUPTIBLE);
179}
180
181static noinline void __sched __up(struct semaphore *sem)
182{
183 if (unlikely(list_empty(&sem->wait_list)))
184 sem->count++;
185 else
186 __up_down_common(sem);
187}
diff --git a/lib/Makefile b/lib/Makefile
index 23de261a4c83..28dba90d5020 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 29obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 30lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 31lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
32lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
33lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 32lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
34obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 33obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
35obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 34obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
diff --git a/lib/semaphore-sleepers.c b/lib/semaphore-sleepers.c
deleted file mode 100644
index 0198782cdacb..000000000000
--- a/lib/semaphore-sleepers.c
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * i386 and x86-64 semaphore implementation.
3 *
4 * (C) Copyright 1999 Linus Torvalds
5 *
6 * Portions Copyright 1999 Red Hat, Inc.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <asm/semaphore.h>
19
20/*
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to acquire the semaphore, while the "sleeping"
24 * variable is a count of such acquires.
25 *
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
30 *
31 * "sleeping" and the contention routine ordering is protected
32 * by the spinlock in the semaphore's waitqueue head.
33 *
34 * Note that these functions are only called when there is
35 * contention on the lock, and as such all this is the
36 * "non-critical" part of the whole semaphore business. The
37 * critical part is the inline stuff in <asm/semaphore.h>
38 * where we want to avoid any extra jumps and calls.
39 */
40
41/*
42 * Logic:
43 * - only on a boundary condition do we need to care. When we go
44 * from a negative count to a non-negative, we wake people up.
45 * - when we go from a non-negative count to a negative do we
46 * (a) synchronize with the "sleeper" count and (b) make sure
47 * that we're on the wakeup list before we synchronize so that
48 * we cannot lose wakeup events.
49 */
50
51void __up(struct semaphore *sem)
52{
53 wake_up(&sem->wait);
54}
55
56void __sched __down(struct semaphore *sem)
57{
58 struct task_struct *tsk = current;
59 DECLARE_WAITQUEUE(wait, tsk);
60 unsigned long flags;
61
62 tsk->state = TASK_UNINTERRUPTIBLE;
63 spin_lock_irqsave(&sem->wait.lock, flags);
64 add_wait_queue_exclusive_locked(&sem->wait, &wait);
65
66 sem->sleepers++;
67 for (;;) {
68 int sleepers = sem->sleepers;
69
70 /*
71 * Add "everybody else" into it. They aren't
72 * playing, because we own the spinlock in
73 * the wait_queue_head.
74 */
75 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
76 sem->sleepers = 0;
77 break;
78 }
79 sem->sleepers = 1; /* us - see -1 above */
80 spin_unlock_irqrestore(&sem->wait.lock, flags);
81
82 schedule();
83
84 spin_lock_irqsave(&sem->wait.lock, flags);
85 tsk->state = TASK_UNINTERRUPTIBLE;
86 }
87 remove_wait_queue_locked(&sem->wait, &wait);
88 wake_up_locked(&sem->wait);
89 spin_unlock_irqrestore(&sem->wait.lock, flags);
90 tsk->state = TASK_RUNNING;
91}
92
93int __sched __down_interruptible(struct semaphore *sem)
94{
95 int retval = 0;
96 struct task_struct *tsk = current;
97 DECLARE_WAITQUEUE(wait, tsk);
98 unsigned long flags;
99
100 tsk->state = TASK_INTERRUPTIBLE;
101 spin_lock_irqsave(&sem->wait.lock, flags);
102 add_wait_queue_exclusive_locked(&sem->wait, &wait);
103
104 sem->sleepers++;
105 for (;;) {
106 int sleepers = sem->sleepers;
107
108 /*
109 * With signals pending, this turns into
110 * the trylock failure case - we won't be
111 * sleeping, and we* can't get the lock as
112 * it has contention. Just correct the count
113 * and exit.
114 */
115 if (signal_pending(current)) {
116 retval = -EINTR;
117 sem->sleepers = 0;
118 atomic_add(sleepers, &sem->count);
119 break;
120 }
121
122 /*
123 * Add "everybody else" into it. They aren't
124 * playing, because we own the spinlock in
125 * wait_queue_head. The "-1" is because we're
126 * still hoping to get the semaphore.
127 */
128 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
129 sem->sleepers = 0;
130 break;
131 }
132 sem->sleepers = 1; /* us - see -1 above */
133 spin_unlock_irqrestore(&sem->wait.lock, flags);
134
135 schedule();
136
137 spin_lock_irqsave(&sem->wait.lock, flags);
138 tsk->state = TASK_INTERRUPTIBLE;
139 }
140 remove_wait_queue_locked(&sem->wait, &wait);
141 wake_up_locked(&sem->wait);
142 spin_unlock_irqrestore(&sem->wait.lock, flags);
143
144 tsk->state = TASK_RUNNING;
145 return retval;
146}
147
148/*
149 * Trylock failed - make sure we correct for
150 * having decremented the count.
151 *
152 * We could have done the trylock with a
153 * single "cmpxchg" without failure cases,
154 * but then it wouldn't work on a 386.
155 */
156int __down_trylock(struct semaphore *sem)
157{
158 int sleepers;
159 unsigned long flags;
160
161 spin_lock_irqsave(&sem->wait.lock, flags);
162 sleepers = sem->sleepers + 1;
163 sem->sleepers = 0;
164
165 /*
166 * Add "everybody else" and us into it. They aren't
167 * playing, because we own the spinlock in the
168 * wait_queue_head.
169 */
170 if (!atomic_add_negative(sleepers, &sem->count)) {
171 wake_up_locked(&sem->wait);
172 }
173
174 spin_unlock_irqrestore(&sem->wait.lock, flags);
175 return 1;
176}