diff options
Diffstat (limited to 'arch')
75 files changed, 26 insertions, 3741 deletions
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile index dccf05245d4d..ac706c1d7ada 100644 --- a/arch/alpha/kernel/Makefile +++ b/arch/alpha/kernel/Makefile | |||
@@ -7,7 +7,7 @@ EXTRA_AFLAGS := $(KBUILD_CFLAGS) | |||
7 | EXTRA_CFLAGS := -Werror -Wno-sign-compare | 7 | EXTRA_CFLAGS := -Werror -Wno-sign-compare |
8 | 8 | ||
9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ | 9 | obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \ |
10 | irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \ | 10 | irq_alpha.o signal.o setup.o ptrace.o time.o \ |
11 | alpha_ksyms.o systbls.o err_common.o io.o | 11 | alpha_ksyms.o systbls.o err_common.o io.o |
12 | 12 | ||
13 | obj-$(CONFIG_VGA_HOSE) += console.o | 13 | obj-$(CONFIG_VGA_HOSE) += console.o |
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c index e9762a33b043..d96e742d4dc2 100644 --- a/arch/alpha/kernel/alpha_ksyms.c +++ b/arch/alpha/kernel/alpha_ksyms.c | |||
@@ -77,15 +77,6 @@ EXPORT_SYMBOL(__do_clear_user); | |||
77 | EXPORT_SYMBOL(__strncpy_from_user); | 77 | EXPORT_SYMBOL(__strncpy_from_user); |
78 | EXPORT_SYMBOL(__strnlen_user); | 78 | EXPORT_SYMBOL(__strnlen_user); |
79 | 79 | ||
80 | /* Semaphore helper functions. */ | ||
81 | EXPORT_SYMBOL(__down_failed); | ||
82 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
83 | EXPORT_SYMBOL(__up_wakeup); | ||
84 | EXPORT_SYMBOL(down); | ||
85 | EXPORT_SYMBOL(down_interruptible); | ||
86 | EXPORT_SYMBOL(down_trylock); | ||
87 | EXPORT_SYMBOL(up); | ||
88 | |||
89 | /* | 80 | /* |
90 | * SMP-specific symbols. | 81 | * SMP-specific symbols. |
91 | */ | 82 | */ |
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c deleted file mode 100644 index 8d2982aa1b8d..000000000000 --- a/arch/alpha/kernel/semaphore.c +++ /dev/null | |||
@@ -1,224 +0,0 @@ | |||
1 | /* | ||
2 | * Alpha semaphore implementation. | ||
3 | * | ||
4 | * (C) Copyright 1996 Linus Torvalds | ||
5 | * (C) Copyright 1999, 2000 Richard Henderson | ||
6 | */ | ||
7 | |||
8 | #include <linux/errno.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/init.h> | ||
11 | |||
12 | /* | ||
13 | * This is basically the PPC semaphore scheme ported to use | ||
14 | * the Alpha ll/sc sequences, so see the PPC code for | ||
15 | * credits. | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * Atomically update sem->count. | ||
20 | * This does the equivalent of the following: | ||
21 | * | ||
22 | * old_count = sem->count; | ||
23 | * tmp = MAX(old_count, 0) + incr; | ||
24 | * sem->count = tmp; | ||
25 | * return old_count; | ||
26 | */ | ||
27 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
28 | { | ||
29 | long old_count, tmp = 0; | ||
30 | |||
31 | __asm__ __volatile__( | ||
32 | "1: ldl_l %0,%2\n" | ||
33 | " cmovgt %0,%0,%1\n" | ||
34 | " addl %1,%3,%1\n" | ||
35 | " stl_c %1,%2\n" | ||
36 | " beq %1,2f\n" | ||
37 | " mb\n" | ||
38 | ".subsection 2\n" | ||
39 | "2: br 1b\n" | ||
40 | ".previous" | ||
41 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
42 | : "Ir" (incr), "1" (tmp), "m" (sem->count)); | ||
43 | |||
44 | return old_count; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Perform the "down" function. Return zero for semaphore acquired, | ||
49 | * return negative for signalled out of the function. | ||
50 | * | ||
51 | * If called from down, the return is ignored and the wait loop is | ||
52 | * not interruptible. This means that a task waiting on a semaphore | ||
53 | * using "down()" cannot be killed until someone does an "up()" on | ||
54 | * the semaphore. | ||
55 | * | ||
56 | * If called from down_interruptible, the return value gets checked | ||
57 | * upon return. If the return value is negative then the task continues | ||
58 | * with the negative value in the return register (it can be tested by | ||
59 | * the caller). | ||
60 | * | ||
61 | * Either form may be used in conjunction with "up()". | ||
62 | */ | ||
63 | |||
64 | void __sched | ||
65 | __down_failed(struct semaphore *sem) | ||
66 | { | ||
67 | struct task_struct *tsk = current; | ||
68 | DECLARE_WAITQUEUE(wait, tsk); | ||
69 | |||
70 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
71 | printk("%s(%d): down failed(%p)\n", | ||
72 | tsk->comm, task_pid_nr(tsk), sem); | ||
73 | #endif | ||
74 | |||
75 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
76 | wmb(); | ||
77 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
78 | |||
79 | /* | ||
80 | * Try to get the semaphore. If the count is > 0, then we've | ||
81 | * got the semaphore; we decrement count and exit the loop. | ||
82 | * If the count is 0 or negative, we set it to -1, indicating | ||
83 | * that we are asleep, and then sleep. | ||
84 | */ | ||
85 | while (__sem_update_count(sem, -1) <= 0) { | ||
86 | schedule(); | ||
87 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
88 | } | ||
89 | remove_wait_queue(&sem->wait, &wait); | ||
90 | tsk->state = TASK_RUNNING; | ||
91 | |||
92 | /* | ||
93 | * If there are any more sleepers, wake one of them up so | ||
94 | * that it can either get the semaphore, or set count to -1 | ||
95 | * indicating that there are still processes sleeping. | ||
96 | */ | ||
97 | wake_up(&sem->wait); | ||
98 | |||
99 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
100 | printk("%s(%d): down acquired(%p)\n", | ||
101 | tsk->comm, task_pid_nr(tsk), sem); | ||
102 | #endif | ||
103 | } | ||
104 | |||
105 | int __sched | ||
106 | __down_failed_interruptible(struct semaphore *sem) | ||
107 | { | ||
108 | struct task_struct *tsk = current; | ||
109 | DECLARE_WAITQUEUE(wait, tsk); | ||
110 | long ret = 0; | ||
111 | |||
112 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
113 | printk("%s(%d): down failed(%p)\n", | ||
114 | tsk->comm, task_pid_nr(tsk), sem); | ||
115 | #endif | ||
116 | |||
117 | tsk->state = TASK_INTERRUPTIBLE; | ||
118 | wmb(); | ||
119 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
120 | |||
121 | while (__sem_update_count(sem, -1) <= 0) { | ||
122 | if (signal_pending(current)) { | ||
123 | /* | ||
124 | * A signal is pending - give up trying. | ||
125 | * Set sem->count to 0 if it is negative, | ||
126 | * since we are no longer sleeping. | ||
127 | */ | ||
128 | __sem_update_count(sem, 0); | ||
129 | ret = -EINTR; | ||
130 | break; | ||
131 | } | ||
132 | schedule(); | ||
133 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
134 | } | ||
135 | |||
136 | remove_wait_queue(&sem->wait, &wait); | ||
137 | tsk->state = TASK_RUNNING; | ||
138 | wake_up(&sem->wait); | ||
139 | |||
140 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
141 | printk("%s(%d): down %s(%p)\n", | ||
142 | current->comm, task_pid_nr(current), | ||
143 | (ret < 0 ? "interrupted" : "acquired"), sem); | ||
144 | #endif | ||
145 | return ret; | ||
146 | } | ||
147 | |||
148 | void | ||
149 | __up_wakeup(struct semaphore *sem) | ||
150 | { | ||
151 | /* | ||
152 | * Note that we incremented count in up() before we came here, | ||
153 | * but that was ineffective since the result was <= 0, and | ||
154 | * any negative value of count is equivalent to 0. | ||
155 | * This ends up setting count to 1, unless count is now > 0 | ||
156 | * (i.e. because some other cpu has called up() in the meantime), | ||
157 | * in which case we just increment count. | ||
158 | */ | ||
159 | __sem_update_count(sem, 1); | ||
160 | wake_up(&sem->wait); | ||
161 | } | ||
162 | |||
163 | void __sched | ||
164 | down(struct semaphore *sem) | ||
165 | { | ||
166 | #ifdef WAITQUEUE_DEBUG | ||
167 | CHECK_MAGIC(sem->__magic); | ||
168 | #endif | ||
169 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
170 | printk("%s(%d): down(%p) <count=%d> from %p\n", | ||
171 | current->comm, task_pid_nr(current), sem, | ||
172 | atomic_read(&sem->count), __builtin_return_address(0)); | ||
173 | #endif | ||
174 | __down(sem); | ||
175 | } | ||
176 | |||
177 | int __sched | ||
178 | down_interruptible(struct semaphore *sem) | ||
179 | { | ||
180 | #ifdef WAITQUEUE_DEBUG | ||
181 | CHECK_MAGIC(sem->__magic); | ||
182 | #endif | ||
183 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
184 | printk("%s(%d): down(%p) <count=%d> from %p\n", | ||
185 | current->comm, task_pid_nr(current), sem, | ||
186 | atomic_read(&sem->count), __builtin_return_address(0)); | ||
187 | #endif | ||
188 | return __down_interruptible(sem); | ||
189 | } | ||
190 | |||
191 | int | ||
192 | down_trylock(struct semaphore *sem) | ||
193 | { | ||
194 | int ret; | ||
195 | |||
196 | #ifdef WAITQUEUE_DEBUG | ||
197 | CHECK_MAGIC(sem->__magic); | ||
198 | #endif | ||
199 | |||
200 | ret = __down_trylock(sem); | ||
201 | |||
202 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
203 | printk("%s(%d): down_trylock %s from %p\n", | ||
204 | current->comm, task_pid_nr(current), | ||
205 | ret ? "failed" : "acquired", | ||
206 | __builtin_return_address(0)); | ||
207 | #endif | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | void | ||
213 | up(struct semaphore *sem) | ||
214 | { | ||
215 | #ifdef WAITQUEUE_DEBUG | ||
216 | CHECK_MAGIC(sem->__magic); | ||
217 | #endif | ||
218 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
219 | printk("%s(%d): up(%p) <count=%d> from %p\n", | ||
220 | current->comm, task_pid_nr(current), sem, | ||
221 | atomic_read(&sem->count), __builtin_return_address(0)); | ||
222 | #endif | ||
223 | __up(sem); | ||
224 | } | ||
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 00d44c6fbfe9..6235f72a14f0 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -7,7 +7,7 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | |||
7 | # Object file lists. | 7 | # Object file lists. |
8 | 8 | ||
9 | obj-y := compat.o entry-armv.o entry-common.o irq.o \ | 9 | obj-y := compat.o entry-armv.o entry-common.o irq.o \ |
10 | process.o ptrace.o semaphore.o setup.o signal.o \ | 10 | process.o ptrace.o setup.o signal.o \ |
11 | sys_arm.o stacktrace.o time.o traps.o | 11 | sys_arm.o stacktrace.o time.o traps.o |
12 | 12 | ||
13 | obj-$(CONFIG_ISA_DMA_API) += dma.o | 13 | obj-$(CONFIG_ISA_DMA_API) += dma.o |
diff --git a/arch/arm/kernel/semaphore.c b/arch/arm/kernel/semaphore.c deleted file mode 100644 index 981fe5c6ccbe..000000000000 --- a/arch/arm/kernel/semaphore.c +++ /dev/null | |||
@@ -1,221 +0,0 @@ | |||
1 | /* | ||
2 | * ARM semaphore implementation, taken from | ||
3 | * | ||
4 | * i386 semaphore implementation. | ||
5 | * | ||
6 | * (C) Copyright 1999 Linus Torvalds | ||
7 | * | ||
8 | * Modified for ARM by Russell King | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/init.h> | ||
18 | |||
19 | #include <asm/semaphore.h> | ||
20 | |||
21 | /* | ||
22 | * Semaphores are implemented using a two-way counter: | ||
23 | * The "count" variable is decremented for each process | ||
24 | * that tries to acquire the semaphore, while the "sleeping" | ||
25 | * variable is a count of such acquires. | ||
26 | * | ||
27 | * Notably, the inline "up()" and "down()" functions can | ||
28 | * efficiently test if they need to do any extra work (up | ||
29 | * needs to do something only if count was negative before | ||
30 | * the increment operation. | ||
31 | * | ||
32 | * "sleeping" and the contention routine ordering is | ||
33 | * protected by the semaphore spinlock. | ||
34 | * | ||
35 | * Note that these functions are only called when there is | ||
36 | * contention on the lock, and as such all this is the | ||
37 | * "non-critical" part of the whole semaphore business. The | ||
38 | * critical part is the inline stuff in <asm/semaphore.h> | ||
39 | * where we want to avoid any extra jumps and calls. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * Logic: | ||
44 | * - only on a boundary condition do we need to care. When we go | ||
45 | * from a negative count to a non-negative, we wake people up. | ||
46 | * - when we go from a non-negative count to a negative do we | ||
47 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
48 | * that we're on the wakeup list before we synchronize so that | ||
49 | * we cannot lose wakeup events. | ||
50 | */ | ||
51 | |||
52 | void __up(struct semaphore *sem) | ||
53 | { | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | |||
57 | static DEFINE_SPINLOCK(semaphore_lock); | ||
58 | |||
59 | void __sched __down(struct semaphore * sem) | ||
60 | { | ||
61 | struct task_struct *tsk = current; | ||
62 | DECLARE_WAITQUEUE(wait, tsk); | ||
63 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
64 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
65 | |||
66 | spin_lock_irq(&semaphore_lock); | ||
67 | sem->sleepers++; | ||
68 | for (;;) { | ||
69 | int sleepers = sem->sleepers; | ||
70 | |||
71 | /* | ||
72 | * Add "everybody else" into it. They aren't | ||
73 | * playing, because we own the spinlock. | ||
74 | */ | ||
75 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
76 | sem->sleepers = 0; | ||
77 | break; | ||
78 | } | ||
79 | sem->sleepers = 1; /* us - see -1 above */ | ||
80 | spin_unlock_irq(&semaphore_lock); | ||
81 | |||
82 | schedule(); | ||
83 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
84 | spin_lock_irq(&semaphore_lock); | ||
85 | } | ||
86 | spin_unlock_irq(&semaphore_lock); | ||
87 | remove_wait_queue(&sem->wait, &wait); | ||
88 | tsk->state = TASK_RUNNING; | ||
89 | wake_up(&sem->wait); | ||
90 | } | ||
91 | |||
92 | int __sched __down_interruptible(struct semaphore * sem) | ||
93 | { | ||
94 | int retval = 0; | ||
95 | struct task_struct *tsk = current; | ||
96 | DECLARE_WAITQUEUE(wait, tsk); | ||
97 | tsk->state = TASK_INTERRUPTIBLE; | ||
98 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
99 | |||
100 | spin_lock_irq(&semaphore_lock); | ||
101 | sem->sleepers ++; | ||
102 | for (;;) { | ||
103 | int sleepers = sem->sleepers; | ||
104 | |||
105 | /* | ||
106 | * With signals pending, this turns into | ||
107 | * the trylock failure case - we won't be | ||
108 | * sleeping, and we* can't get the lock as | ||
109 | * it has contention. Just correct the count | ||
110 | * and exit. | ||
111 | */ | ||
112 | if (signal_pending(current)) { | ||
113 | retval = -EINTR; | ||
114 | sem->sleepers = 0; | ||
115 | atomic_add(sleepers, &sem->count); | ||
116 | break; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Add "everybody else" into it. They aren't | ||
121 | * playing, because we own the spinlock. The | ||
122 | * "-1" is because we're still hoping to get | ||
123 | * the lock. | ||
124 | */ | ||
125 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
126 | sem->sleepers = 0; | ||
127 | break; | ||
128 | } | ||
129 | sem->sleepers = 1; /* us - see -1 above */ | ||
130 | spin_unlock_irq(&semaphore_lock); | ||
131 | |||
132 | schedule(); | ||
133 | tsk->state = TASK_INTERRUPTIBLE; | ||
134 | spin_lock_irq(&semaphore_lock); | ||
135 | } | ||
136 | spin_unlock_irq(&semaphore_lock); | ||
137 | tsk->state = TASK_RUNNING; | ||
138 | remove_wait_queue(&sem->wait, &wait); | ||
139 | wake_up(&sem->wait); | ||
140 | return retval; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Trylock failed - make sure we correct for | ||
145 | * having decremented the count. | ||
146 | * | ||
147 | * We could have done the trylock with a | ||
148 | * single "cmpxchg" without failure cases, | ||
149 | * but then it wouldn't work on a 386. | ||
150 | */ | ||
151 | int __down_trylock(struct semaphore * sem) | ||
152 | { | ||
153 | int sleepers; | ||
154 | unsigned long flags; | ||
155 | |||
156 | spin_lock_irqsave(&semaphore_lock, flags); | ||
157 | sleepers = sem->sleepers + 1; | ||
158 | sem->sleepers = 0; | ||
159 | |||
160 | /* | ||
161 | * Add "everybody else" and us into it. They aren't | ||
162 | * playing, because we own the spinlock. | ||
163 | */ | ||
164 | if (!atomic_add_negative(sleepers, &sem->count)) | ||
165 | wake_up(&sem->wait); | ||
166 | |||
167 | spin_unlock_irqrestore(&semaphore_lock, flags); | ||
168 | return 1; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * The semaphore operations have a special calling sequence that | ||
173 | * allow us to do a simpler in-line version of them. These routines | ||
174 | * need to convert that sequence back into the C sequence when | ||
175 | * there is contention on the semaphore. | ||
176 | * | ||
177 | * ip contains the semaphore pointer on entry. Save the C-clobbered | ||
178 | * registers (r0 to r3 and lr), but not ip, as we use it as a return | ||
179 | * value in some cases.. | ||
180 | * To remain AAPCS compliant (64-bit stack align) we save r4 as well. | ||
181 | */ | ||
182 | asm(" .section .sched.text,\"ax\",%progbits \n\ | ||
183 | .align 5 \n\ | ||
184 | .globl __down_failed \n\ | ||
185 | __down_failed: \n\ | ||
186 | stmfd sp!, {r0 - r4, lr} \n\ | ||
187 | mov r0, ip \n\ | ||
188 | bl __down \n\ | ||
189 | ldmfd sp!, {r0 - r4, pc} \n\ | ||
190 | \n\ | ||
191 | .align 5 \n\ | ||
192 | .globl __down_interruptible_failed \n\ | ||
193 | __down_interruptible_failed: \n\ | ||
194 | stmfd sp!, {r0 - r4, lr} \n\ | ||
195 | mov r0, ip \n\ | ||
196 | bl __down_interruptible \n\ | ||
197 | mov ip, r0 \n\ | ||
198 | ldmfd sp!, {r0 - r4, pc} \n\ | ||
199 | \n\ | ||
200 | .align 5 \n\ | ||
201 | .globl __down_trylock_failed \n\ | ||
202 | __down_trylock_failed: \n\ | ||
203 | stmfd sp!, {r0 - r4, lr} \n\ | ||
204 | mov r0, ip \n\ | ||
205 | bl __down_trylock \n\ | ||
206 | mov ip, r0 \n\ | ||
207 | ldmfd sp!, {r0 - r4, pc} \n\ | ||
208 | \n\ | ||
209 | .align 5 \n\ | ||
210 | .globl __up_wakeup \n\ | ||
211 | __up_wakeup: \n\ | ||
212 | stmfd sp!, {r0 - r4, lr} \n\ | ||
213 | mov r0, ip \n\ | ||
214 | bl __up \n\ | ||
215 | ldmfd sp!, {r0 - r4, pc} \n\ | ||
216 | "); | ||
217 | |||
218 | EXPORT_SYMBOL(__down_failed); | ||
219 | EXPORT_SYMBOL(__down_interruptible_failed); | ||
220 | EXPORT_SYMBOL(__down_trylock_failed); | ||
221 | EXPORT_SYMBOL(__up_wakeup); | ||
diff --git a/arch/avr32/kernel/Makefile b/arch/avr32/kernel/Makefile index e4b6d122b033..18229d0d1861 100644 --- a/arch/avr32/kernel/Makefile +++ b/arch/avr32/kernel/Makefile | |||
@@ -6,7 +6,7 @@ extra-y := head.o vmlinux.lds | |||
6 | 6 | ||
7 | obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o | 7 | obj-$(CONFIG_SUBARCH_AVR32B) += entry-avr32b.o |
8 | obj-y += syscall_table.o syscall-stubs.o irq.o | 8 | obj-y += syscall_table.o syscall-stubs.o irq.o |
9 | obj-y += setup.o traps.o semaphore.o ocd.o ptrace.o | 9 | obj-y += setup.o traps.o ocd.o ptrace.o |
10 | obj-y += signal.o sys_avr32.o process.o time.o | 10 | obj-y += signal.o sys_avr32.o process.o time.o |
11 | obj-y += init_task.o switch_to.o cpu.o | 11 | obj-y += init_task.o switch_to.o cpu.o |
12 | obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o | 12 | obj-$(CONFIG_MODULES) += module.o avr32_ksyms.o |
diff --git a/arch/avr32/kernel/semaphore.c b/arch/avr32/kernel/semaphore.c deleted file mode 100644 index 1e2705a05016..000000000000 --- a/arch/avr32/kernel/semaphore.c +++ /dev/null | |||
@@ -1,148 +0,0 @@ | |||
1 | /* | ||
2 | * AVR32 sempahore implementation. | ||
3 | * | ||
4 | * Copyright (C) 2004-2006 Atmel Corporation | ||
5 | * | ||
6 | * Based on linux/arch/i386/kernel/semaphore.c | ||
7 | * Copyright (C) 1999 Linus Torvalds | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/sched.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | #include <asm/semaphore.h> | ||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | /* | ||
22 | * Semaphores are implemented using a two-way counter: | ||
23 | * The "count" variable is decremented for each process | ||
24 | * that tries to acquire the semaphore, while the "sleeping" | ||
25 | * variable is a count of such acquires. | ||
26 | * | ||
27 | * Notably, the inline "up()" and "down()" functions can | ||
28 | * efficiently test if they need to do any extra work (up | ||
29 | * needs to do something only if count was negative before | ||
30 | * the increment operation. | ||
31 | * | ||
32 | * "sleeping" and the contention routine ordering is protected | ||
33 | * by the spinlock in the semaphore's waitqueue head. | ||
34 | * | ||
35 | * Note that these functions are only called when there is | ||
36 | * contention on the lock, and as such all this is the | ||
37 | * "non-critical" part of the whole semaphore business. The | ||
38 | * critical part is the inline stuff in <asm/semaphore.h> | ||
39 | * where we want to avoid any extra jumps and calls. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * Logic: | ||
44 | * - only on a boundary condition do we need to care. When we go | ||
45 | * from a negative count to a non-negative, we wake people up. | ||
46 | * - when we go from a non-negative count to a negative do we | ||
47 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
48 | * that we're on the wakeup list before we synchronize so that | ||
49 | * we cannot lose wakeup events. | ||
50 | */ | ||
51 | |||
52 | void __up(struct semaphore *sem) | ||
53 | { | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | EXPORT_SYMBOL(__up); | ||
57 | |||
58 | void __sched __down(struct semaphore *sem) | ||
59 | { | ||
60 | struct task_struct *tsk = current; | ||
61 | DECLARE_WAITQUEUE(wait, tsk); | ||
62 | unsigned long flags; | ||
63 | |||
64 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
65 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
66 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
67 | |||
68 | sem->sleepers++; | ||
69 | for (;;) { | ||
70 | int sleepers = sem->sleepers; | ||
71 | |||
72 | /* | ||
73 | * Add "everybody else" into it. They aren't | ||
74 | * playing, because we own the spinlock in | ||
75 | * the wait_queue_head. | ||
76 | */ | ||
77 | if (atomic_add_return(sleepers - 1, &sem->count) >= 0) { | ||
78 | sem->sleepers = 0; | ||
79 | break; | ||
80 | } | ||
81 | sem->sleepers = 1; /* us - see -1 above */ | ||
82 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
83 | |||
84 | schedule(); | ||
85 | |||
86 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
87 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
88 | } | ||
89 | remove_wait_queue_locked(&sem->wait, &wait); | ||
90 | wake_up_locked(&sem->wait); | ||
91 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
92 | tsk->state = TASK_RUNNING; | ||
93 | } | ||
94 | EXPORT_SYMBOL(__down); | ||
95 | |||
96 | int __sched __down_interruptible(struct semaphore *sem) | ||
97 | { | ||
98 | int retval = 0; | ||
99 | struct task_struct *tsk = current; | ||
100 | DECLARE_WAITQUEUE(wait, tsk); | ||
101 | unsigned long flags; | ||
102 | |||
103 | tsk->state = TASK_INTERRUPTIBLE; | ||
104 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
105 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
106 | |||
107 | sem->sleepers++; | ||
108 | for (;;) { | ||
109 | int sleepers = sem->sleepers; | ||
110 | |||
111 | /* | ||
112 | * With signals pending, this turns into the trylock | ||
113 | * failure case - we won't be sleeping, and we can't | ||
114 | * get the lock as it has contention. Just correct the | ||
115 | * count and exit. | ||
116 | */ | ||
117 | if (signal_pending(current)) { | ||
118 | retval = -EINTR; | ||
119 | sem->sleepers = 0; | ||
120 | atomic_add(sleepers, &sem->count); | ||
121 | break; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Add "everybody else" into it. They aren't | ||
126 | * playing, because we own the spinlock in | ||
127 | * the wait_queue_head. | ||
128 | */ | ||
129 | if (atomic_add_return(sleepers - 1, &sem->count) >= 0) { | ||
130 | sem->sleepers = 0; | ||
131 | break; | ||
132 | } | ||
133 | sem->sleepers = 1; /* us - see -1 above */ | ||
134 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
135 | |||
136 | schedule(); | ||
137 | |||
138 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
139 | tsk->state = TASK_INTERRUPTIBLE; | ||
140 | } | ||
141 | remove_wait_queue_locked(&sem->wait, &wait); | ||
142 | wake_up_locked(&sem->wait); | ||
143 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
144 | |||
145 | tsk->state = TASK_RUNNING; | ||
146 | return retval; | ||
147 | } | ||
148 | EXPORT_SYMBOL(__down_interruptible); | ||
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 589c6aca4803..2dd1f300a5cf 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -31,10 +31,6 @@ config ZONE_DMA | |||
31 | bool | 31 | bool |
32 | default y | 32 | default y |
33 | 33 | ||
34 | config SEMAPHORE_SLEEPERS | ||
35 | bool | ||
36 | default y | ||
37 | |||
38 | config GENERIC_FIND_NEXT_BIT | 34 | config GENERIC_FIND_NEXT_BIT |
39 | bool | 35 | bool |
40 | default y | 36 | default y |
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index 0bfbb269e350..053edff6c0d8 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c | |||
@@ -42,11 +42,6 @@ EXPORT_SYMBOL(ip_fast_csum); | |||
42 | 42 | ||
43 | EXPORT_SYMBOL(kernel_thread); | 43 | EXPORT_SYMBOL(kernel_thread); |
44 | 44 | ||
45 | EXPORT_SYMBOL(__up); | ||
46 | EXPORT_SYMBOL(__down); | ||
47 | EXPORT_SYMBOL(__down_trylock); | ||
48 | EXPORT_SYMBOL(__down_interruptible); | ||
49 | |||
50 | EXPORT_SYMBOL(is_in_rom); | 45 | EXPORT_SYMBOL(is_in_rom); |
51 | EXPORT_SYMBOL(bfin_return_from_exception); | 46 | EXPORT_SYMBOL(bfin_return_from_exception); |
52 | 47 | ||
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile index c8e8ea570989..ee7bcd4d20b2 100644 --- a/arch/cris/kernel/Makefile +++ b/arch/cris/kernel/Makefile | |||
@@ -5,8 +5,7 @@ | |||
5 | 5 | ||
6 | extra-y := vmlinux.lds | 6 | extra-y := vmlinux.lds |
7 | 7 | ||
8 | obj-y := process.o traps.o irq.o ptrace.o setup.o \ | 8 | obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o |
9 | time.o sys_cris.o semaphore.o | ||
10 | 9 | ||
11 | obj-$(CONFIG_MODULES) += crisksyms.o | 10 | obj-$(CONFIG_MODULES) += crisksyms.o |
12 | obj-$(CONFIG_MODULES) += module.o | 11 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/cris/kernel/crisksyms.c b/arch/cris/kernel/crisksyms.c index 62f0e752915a..7ac000f6a888 100644 --- a/arch/cris/kernel/crisksyms.c +++ b/arch/cris/kernel/crisksyms.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/string.h> | 9 | #include <linux/string.h> |
10 | #include <linux/tty.h> | 10 | #include <linux/tty.h> |
11 | 11 | ||
12 | #include <asm/semaphore.h> | ||
13 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
14 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
15 | #include <asm/checksum.h> | 14 | #include <asm/checksum.h> |
@@ -49,12 +48,6 @@ EXPORT_SYMBOL(__negdi2); | |||
49 | EXPORT_SYMBOL(__ioremap); | 48 | EXPORT_SYMBOL(__ioremap); |
50 | EXPORT_SYMBOL(iounmap); | 49 | EXPORT_SYMBOL(iounmap); |
51 | 50 | ||
52 | /* Semaphore functions */ | ||
53 | EXPORT_SYMBOL(__up); | ||
54 | EXPORT_SYMBOL(__down); | ||
55 | EXPORT_SYMBOL(__down_interruptible); | ||
56 | EXPORT_SYMBOL(__down_trylock); | ||
57 | |||
58 | /* Userspace access functions */ | 51 | /* Userspace access functions */ |
59 | EXPORT_SYMBOL(__copy_user_zeroing); | 52 | EXPORT_SYMBOL(__copy_user_zeroing); |
60 | EXPORT_SYMBOL(__copy_user); | 53 | EXPORT_SYMBOL(__copy_user); |
diff --git a/arch/cris/kernel/semaphore.c b/arch/cris/kernel/semaphore.c deleted file mode 100644 index f137a439041f..000000000000 --- a/arch/cris/kernel/semaphore.c +++ /dev/null | |||
@@ -1,129 +0,0 @@ | |||
1 | /* | ||
2 | * Generic semaphore code. Buyer beware. Do your own | ||
3 | * specific changes in <asm/semaphore-helper.h> | ||
4 | */ | ||
5 | |||
6 | #include <linux/sched.h> | ||
7 | #include <asm/semaphore-helper.h> | ||
8 | |||
9 | /* | ||
10 | * Semaphores are implemented using a two-way counter: | ||
11 | * The "count" variable is decremented for each process | ||
12 | * that tries to sleep, while the "waking" variable is | ||
13 | * incremented when the "up()" code goes to wake up waiting | ||
14 | * processes. | ||
15 | * | ||
16 | * Notably, the inline "up()" and "down()" functions can | ||
17 | * efficiently test if they need to do any extra work (up | ||
18 | * needs to do something only if count was negative before | ||
19 | * the increment operation. | ||
20 | * | ||
21 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
22 | * atomically. | ||
23 | * | ||
24 | * When __up() is called, the count was negative before | ||
25 | * incrementing it, and we need to wake up somebody. | ||
26 | * | ||
27 | * This routine adds one to the count of processes that need to | ||
28 | * wake up and exit. ALL waiting processes actually wake up but | ||
29 | * only the one that gets to the "waking" field first will gate | ||
30 | * through and acquire the semaphore. The others will go back | ||
31 | * to sleep. | ||
32 | * | ||
33 | * Note that these functions are only called when there is | ||
34 | * contention on the lock, and as such all this is the | ||
35 | * "non-critical" part of the whole semaphore business. The | ||
36 | * critical part is the inline stuff in <asm/semaphore.h> | ||
37 | * where we want to avoid any extra jumps and calls. | ||
38 | */ | ||
39 | void __up(struct semaphore *sem) | ||
40 | { | ||
41 | wake_one_more(sem); | ||
42 | wake_up(&sem->wait); | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Perform the "down" function. Return zero for semaphore acquired, | ||
47 | * return negative for signalled out of the function. | ||
48 | * | ||
49 | * If called from __down, the return is ignored and the wait loop is | ||
50 | * not interruptible. This means that a task waiting on a semaphore | ||
51 | * using "down()" cannot be killed until someone does an "up()" on | ||
52 | * the semaphore. | ||
53 | * | ||
54 | * If called from __down_interruptible, the return value gets checked | ||
55 | * upon return. If the return value is negative then the task continues | ||
56 | * with the negative value in the return register (it can be tested by | ||
57 | * the caller). | ||
58 | * | ||
59 | * Either form may be used in conjunction with "up()". | ||
60 | * | ||
61 | */ | ||
62 | |||
63 | #define DOWN_VAR \ | ||
64 | struct task_struct *tsk = current; \ | ||
65 | wait_queue_t wait; \ | ||
66 | init_waitqueue_entry(&wait, tsk); | ||
67 | |||
68 | #define DOWN_HEAD(task_state) \ | ||
69 | \ | ||
70 | \ | ||
71 | tsk->state = (task_state); \ | ||
72 | add_wait_queue(&sem->wait, &wait); \ | ||
73 | \ | ||
74 | /* \ | ||
75 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
76 | * so we must wait. \ | ||
77 | * \ | ||
78 | * We can let go the lock for purposes of waiting. \ | ||
79 | * We re-acquire it after awaking so as to protect \ | ||
80 | * all semaphore operations. \ | ||
81 | * \ | ||
82 | * If "up()" is called before we call waking_non_zero() then \ | ||
83 | * we will catch it right away. If it is called later then \ | ||
84 | * we will have to go through a wakeup cycle to catch it. \ | ||
85 | * \ | ||
86 | * Multiple waiters contend for the semaphore lock to see \ | ||
87 | * who gets to gate through and who has to wait some more. \ | ||
88 | */ \ | ||
89 | for (;;) { | ||
90 | |||
91 | #define DOWN_TAIL(task_state) \ | ||
92 | tsk->state = (task_state); \ | ||
93 | } \ | ||
94 | tsk->state = TASK_RUNNING; \ | ||
95 | remove_wait_queue(&sem->wait, &wait); | ||
96 | |||
97 | void __sched __down(struct semaphore * sem) | ||
98 | { | ||
99 | DOWN_VAR | ||
100 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
101 | if (waking_non_zero(sem)) | ||
102 | break; | ||
103 | schedule(); | ||
104 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
105 | } | ||
106 | |||
107 | int __sched __down_interruptible(struct semaphore * sem) | ||
108 | { | ||
109 | int ret = 0; | ||
110 | DOWN_VAR | ||
111 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
112 | |||
113 | ret = waking_non_zero_interruptible(sem, tsk); | ||
114 | if (ret) | ||
115 | { | ||
116 | if (ret == 1) | ||
117 | /* ret != 0 only if we get interrupted -arca */ | ||
118 | ret = 0; | ||
119 | break; | ||
120 | } | ||
121 | schedule(); | ||
122 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | int __down_trylock(struct semaphore * sem) | ||
127 | { | ||
128 | return waking_non_zero_trylock(sem); | ||
129 | } | ||
diff --git a/arch/frv/kernel/Makefile b/arch/frv/kernel/Makefile index e8f73ed28b52..c36f70b6699a 100644 --- a/arch/frv/kernel/Makefile +++ b/arch/frv/kernel/Makefile | |||
@@ -9,7 +9,7 @@ extra-y:= head.o init_task.o vmlinux.lds | |||
9 | 9 | ||
10 | obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \ | 10 | obj-y := $(heads-y) entry.o entry-table.o break.o switch_to.o kernel_thread.o \ |
11 | kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \ | 11 | kernel_execve.o process.o traps.o ptrace.o signal.o dma.o \ |
12 | sys_frv.o time.o semaphore.o setup.o frv_ksyms.o \ | 12 | sys_frv.o time.o setup.o frv_ksyms.o \ |
13 | debug-stub.o irq.o sleep.o uaccess.o | 13 | debug-stub.o irq.o sleep.o uaccess.o |
14 | 14 | ||
15 | obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o | 15 | obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-io.o |
diff --git a/arch/frv/kernel/frv_ksyms.c b/arch/frv/kernel/frv_ksyms.c index f772704b3d28..0316b3c50eff 100644 --- a/arch/frv/kernel/frv_ksyms.c +++ b/arch/frv/kernel/frv_ksyms.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <asm/pgalloc.h> | 12 | #include <asm/pgalloc.h> |
13 | #include <asm/irq.h> | 13 | #include <asm/irq.h> |
14 | #include <asm/io.h> | 14 | #include <asm/io.h> |
15 | #include <asm/semaphore.h> | ||
16 | #include <asm/checksum.h> | 15 | #include <asm/checksum.h> |
17 | #include <asm/hardirq.h> | 16 | #include <asm/hardirq.h> |
18 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c deleted file mode 100644 index 7ee3a147b471..000000000000 --- a/arch/frv/kernel/semaphore.c +++ /dev/null | |||
@@ -1,155 +0,0 @@ | |||
1 | /* semaphore.c: FR-V semaphores | ||
2 | * | ||
3 | * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * - Derived from lib/rwsem-spinlock.c | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/sched.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <asm/semaphore.h> | ||
16 | |||
17 | struct sem_waiter { | ||
18 | struct list_head list; | ||
19 | struct task_struct *task; | ||
20 | }; | ||
21 | |||
22 | #ifdef CONFIG_DEBUG_SEMAPHORE | ||
23 | void semtrace(struct semaphore *sem, const char *str) | ||
24 | { | ||
25 | if (sem->debug) | ||
26 | printk("[%d] %s({%d,%d})\n", | ||
27 | current->pid, | ||
28 | str, | ||
29 | sem->counter, | ||
30 | list_empty(&sem->wait_list) ? 0 : 1); | ||
31 | } | ||
32 | #else | ||
33 | #define semtrace(SEM,STR) do { } while(0) | ||
34 | #endif | ||
35 | |||
36 | /* | ||
37 | * wait for a token to be granted from a semaphore | ||
38 | * - entered with lock held and interrupts disabled | ||
39 | */ | ||
40 | void __down(struct semaphore *sem, unsigned long flags) | ||
41 | { | ||
42 | struct task_struct *tsk = current; | ||
43 | struct sem_waiter waiter; | ||
44 | |||
45 | semtrace(sem, "Entering __down"); | ||
46 | |||
47 | /* set up my own style of waitqueue */ | ||
48 | waiter.task = tsk; | ||
49 | get_task_struct(tsk); | ||
50 | |||
51 | list_add_tail(&waiter.list, &sem->wait_list); | ||
52 | |||
53 | /* we don't need to touch the semaphore struct anymore */ | ||
54 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
55 | |||
56 | /* wait to be given the semaphore */ | ||
57 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
58 | |||
59 | for (;;) { | ||
60 | if (list_empty(&waiter.list)) | ||
61 | break; | ||
62 | schedule(); | ||
63 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
64 | } | ||
65 | |||
66 | tsk->state = TASK_RUNNING; | ||
67 | semtrace(sem, "Leaving __down"); | ||
68 | } | ||
69 | |||
70 | EXPORT_SYMBOL(__down); | ||
71 | |||
72 | /* | ||
73 | * interruptibly wait for a token to be granted from a semaphore | ||
74 | * - entered with lock held and interrupts disabled | ||
75 | */ | ||
76 | int __down_interruptible(struct semaphore *sem, unsigned long flags) | ||
77 | { | ||
78 | struct task_struct *tsk = current; | ||
79 | struct sem_waiter waiter; | ||
80 | int ret; | ||
81 | |||
82 | semtrace(sem,"Entering __down_interruptible"); | ||
83 | |||
84 | /* set up my own style of waitqueue */ | ||
85 | waiter.task = tsk; | ||
86 | get_task_struct(tsk); | ||
87 | |||
88 | list_add_tail(&waiter.list, &sem->wait_list); | ||
89 | |||
90 | /* we don't need to touch the semaphore struct anymore */ | ||
91 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
92 | |||
93 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
94 | |||
95 | /* wait to be given the semaphore */ | ||
96 | ret = 0; | ||
97 | for (;;) { | ||
98 | if (list_empty(&waiter.list)) | ||
99 | break; | ||
100 | if (unlikely(signal_pending(current))) | ||
101 | goto interrupted; | ||
102 | schedule(); | ||
103 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
104 | } | ||
105 | |||
106 | out: | ||
107 | tsk->state = TASK_RUNNING; | ||
108 | semtrace(sem, "Leaving __down_interruptible"); | ||
109 | return ret; | ||
110 | |||
111 | interrupted: | ||
112 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
113 | |||
114 | if (!list_empty(&waiter.list)) { | ||
115 | list_del(&waiter.list); | ||
116 | ret = -EINTR; | ||
117 | } | ||
118 | |||
119 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
120 | if (ret == -EINTR) | ||
121 | put_task_struct(current); | ||
122 | goto out; | ||
123 | } | ||
124 | |||
125 | EXPORT_SYMBOL(__down_interruptible); | ||
126 | |||
127 | /* | ||
128 | * release a single token back to a semaphore | ||
129 | * - entered with lock held and interrupts disabled | ||
130 | */ | ||
131 | void __up(struct semaphore *sem) | ||
132 | { | ||
133 | struct task_struct *tsk; | ||
134 | struct sem_waiter *waiter; | ||
135 | |||
136 | semtrace(sem,"Entering __up"); | ||
137 | |||
138 | /* grant the token to the process at the front of the queue */ | ||
139 | waiter = list_entry(sem->wait_list.next, struct sem_waiter, list); | ||
140 | |||
141 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. | ||
142 | * It is allocated on the waiter's stack and may become invalid at | ||
143 | * any time after that point (due to a wakeup from another source). | ||
144 | */ | ||
145 | list_del_init(&waiter->list); | ||
146 | tsk = waiter->task; | ||
147 | mb(); | ||
148 | waiter->task = NULL; | ||
149 | wake_up_process(tsk); | ||
150 | put_task_struct(tsk); | ||
151 | |||
152 | semtrace(sem,"Leaving __up"); | ||
153 | } | ||
154 | |||
155 | EXPORT_SYMBOL(__up); | ||
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile index 874f6aefee65..6c248c3c5c3b 100644 --- a/arch/h8300/kernel/Makefile +++ b/arch/h8300/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := vmlinux.lds | 5 | extra-y := vmlinux.lds |
6 | 6 | ||
7 | obj-y := process.o traps.o ptrace.o irq.o \ | 7 | obj-y := process.o traps.o ptrace.o irq.o \ |
8 | sys_h8300.o time.o semaphore.o signal.o \ | 8 | sys_h8300.o time.o signal.o \ |
9 | setup.o gpio.o init_task.o syscalls.o \ | 9 | setup.o gpio.o init_task.o syscalls.o \ |
10 | entry.o | 10 | entry.o |
11 | 11 | ||
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c index d1b15267ac81..6866bd9c7fb4 100644 --- a/arch/h8300/kernel/h8300_ksyms.c +++ b/arch/h8300/kernel/h8300_ksyms.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <asm/pgalloc.h> | 12 | #include <asm/pgalloc.h> |
13 | #include <asm/irq.h> | 13 | #include <asm/irq.h> |
14 | #include <asm/io.h> | 14 | #include <asm/io.h> |
15 | #include <asm/semaphore.h> | ||
16 | #include <asm/checksum.h> | 15 | #include <asm/checksum.h> |
17 | #include <asm/current.h> | 16 | #include <asm/current.h> |
18 | #include <asm/gpio.h> | 17 | #include <asm/gpio.h> |
diff --git a/arch/h8300/kernel/semaphore.c b/arch/h8300/kernel/semaphore.c deleted file mode 100644 index d12cbbfe6ebd..000000000000 --- a/arch/h8300/kernel/semaphore.c +++ /dev/null | |||
@@ -1,132 +0,0 @@ | |||
1 | /* | ||
2 | * Generic semaphore code. Buyer beware. Do your own | ||
3 | * specific changes in <asm/semaphore-helper.h> | ||
4 | */ | ||
5 | |||
6 | #include <linux/sched.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <asm/semaphore-helper.h> | ||
9 | |||
10 | #ifndef CONFIG_RMW_INSNS | ||
11 | spinlock_t semaphore_wake_lock; | ||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * Semaphores are implemented using a two-way counter: | ||
16 | * The "count" variable is decremented for each process | ||
17 | * that tries to sleep, while the "waking" variable is | ||
18 | * incremented when the "up()" code goes to wake up waiting | ||
19 | * processes. | ||
20 | * | ||
21 | * Notably, the inline "up()" and "down()" functions can | ||
22 | * efficiently test if they need to do any extra work (up | ||
23 | * needs to do something only if count was negative before | ||
24 | * the increment operation. | ||
25 | * | ||
26 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
27 | * atomically. | ||
28 | * | ||
29 | * When __up() is called, the count was negative before | ||
30 | * incrementing it, and we need to wake up somebody. | ||
31 | * | ||
32 | * This routine adds one to the count of processes that need to | ||
33 | * wake up and exit. ALL waiting processes actually wake up but | ||
34 | * only the one that gets to the "waking" field first will gate | ||
35 | * through and acquire the semaphore. The others will go back | ||
36 | * to sleep. | ||
37 | * | ||
38 | * Note that these functions are only called when there is | ||
39 | * contention on the lock, and as such all this is the | ||
40 | * "non-critical" part of the whole semaphore business. The | ||
41 | * critical part is the inline stuff in <asm/semaphore.h> | ||
42 | * where we want to avoid any extra jumps and calls. | ||
43 | */ | ||
44 | void __up(struct semaphore *sem) | ||
45 | { | ||
46 | wake_one_more(sem); | ||
47 | wake_up(&sem->wait); | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Perform the "down" function. Return zero for semaphore acquired, | ||
52 | * return negative for signalled out of the function. | ||
53 | * | ||
54 | * If called from __down, the return is ignored and the wait loop is | ||
55 | * not interruptible. This means that a task waiting on a semaphore | ||
56 | * using "down()" cannot be killed until someone does an "up()" on | ||
57 | * the semaphore. | ||
58 | * | ||
59 | * If called from __down_interruptible, the return value gets checked | ||
60 | * upon return. If the return value is negative then the task continues | ||
61 | * with the negative value in the return register (it can be tested by | ||
62 | * the caller). | ||
63 | * | ||
64 | * Either form may be used in conjunction with "up()". | ||
65 | * | ||
66 | */ | ||
67 | |||
68 | |||
69 | #define DOWN_HEAD(task_state) \ | ||
70 | \ | ||
71 | \ | ||
72 | current->state = (task_state); \ | ||
73 | add_wait_queue(&sem->wait, &wait); \ | ||
74 | \ | ||
75 | /* \ | ||
76 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
77 | * so we must wait. \ | ||
78 | * \ | ||
79 | * We can let go the lock for purposes of waiting. \ | ||
80 | * We re-acquire it after awaking so as to protect \ | ||
81 | * all semaphore operations. \ | ||
82 | * \ | ||
83 | * If "up()" is called before we call waking_non_zero() then \ | ||
84 | * we will catch it right away. If it is called later then \ | ||
85 | * we will have to go through a wakeup cycle to catch it. \ | ||
86 | * \ | ||
87 | * Multiple waiters contend for the semaphore lock to see \ | ||
88 | * who gets to gate through and who has to wait some more. \ | ||
89 | */ \ | ||
90 | for (;;) { | ||
91 | |||
92 | #define DOWN_TAIL(task_state) \ | ||
93 | current->state = (task_state); \ | ||
94 | } \ | ||
95 | current->state = TASK_RUNNING; \ | ||
96 | remove_wait_queue(&sem->wait, &wait); | ||
97 | |||
98 | void __sched __down(struct semaphore * sem) | ||
99 | { | ||
100 | DECLARE_WAITQUEUE(wait, current); | ||
101 | |||
102 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
103 | if (waking_non_zero(sem)) | ||
104 | break; | ||
105 | schedule(); | ||
106 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
107 | } | ||
108 | |||
109 | int __sched __down_interruptible(struct semaphore * sem) | ||
110 | { | ||
111 | DECLARE_WAITQUEUE(wait, current); | ||
112 | int ret = 0; | ||
113 | |||
114 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
115 | |||
116 | ret = waking_non_zero_interruptible(sem, current); | ||
117 | if (ret) | ||
118 | { | ||
119 | if (ret == 1) | ||
120 | /* ret != 0 only if we get interrupted -arca */ | ||
121 | ret = 0; | ||
122 | break; | ||
123 | } | ||
124 | schedule(); | ||
125 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | int __down_trylock(struct semaphore * sem) | ||
130 | { | ||
131 | return waking_non_zero_trylock(sem); | ||
132 | } | ||
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 33e5a598672d..13fd10e8699e 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
6 | 6 | ||
7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ | 8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ |
9 | salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ | 9 | salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ |
10 | unwind.o mca.o mca_asm.o topology.o | 10 | unwind.o mca.o mca_asm.o topology.o |
11 | 11 | ||
12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o | 12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o |
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 8e7193d55528..6da1f20d7372 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c | |||
@@ -19,12 +19,6 @@ EXPORT_SYMBOL_GPL(empty_zero_page); | |||
19 | EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ | 19 | EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ |
20 | EXPORT_SYMBOL(csum_ipv6_magic); | 20 | EXPORT_SYMBOL(csum_ipv6_magic); |
21 | 21 | ||
22 | #include <asm/semaphore.h> | ||
23 | EXPORT_SYMBOL(__down); | ||
24 | EXPORT_SYMBOL(__down_interruptible); | ||
25 | EXPORT_SYMBOL(__down_trylock); | ||
26 | EXPORT_SYMBOL(__up); | ||
27 | |||
28 | #include <asm/page.h> | 22 | #include <asm/page.h> |
29 | EXPORT_SYMBOL(clear_page); | 23 | EXPORT_SYMBOL(clear_page); |
30 | 24 | ||
diff --git a/arch/ia64/kernel/semaphore.c b/arch/ia64/kernel/semaphore.c deleted file mode 100644 index 2724ef3fbae2..000000000000 --- a/arch/ia64/kernel/semaphore.c +++ /dev/null | |||
@@ -1,165 +0,0 @@ | |||
1 | /* | ||
2 | * IA-64 semaphore implementation (derived from x86 version). | ||
3 | * | ||
4 | * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co | ||
5 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | */ | ||
7 | |||
8 | /* | ||
9 | * Semaphores are implemented using a two-way counter: The "count" | ||
10 | * variable is decremented for each process that tries to acquire the | ||
11 | * semaphore, while the "sleepers" variable is a count of such | ||
12 | * acquires. | ||
13 | * | ||
14 | * Notably, the inline "up()" and "down()" functions can efficiently | ||
15 | * test if they need to do any extra work (up needs to do something | ||
16 | * only if count was negative before the increment operation. | ||
17 | * | ||
18 | * "sleeping" and the contention routine ordering is protected | ||
19 | * by the spinlock in the semaphore's waitqueue head. | ||
20 | * | ||
21 | * Note that these functions are only called when there is contention | ||
22 | * on the lock, and as such all this is the "non-critical" part of the | ||
23 | * whole semaphore business. The critical part is the inline stuff in | ||
24 | * <asm/semaphore.h> where we want to avoid any extra jumps and calls. | ||
25 | */ | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/init.h> | ||
28 | |||
29 | #include <asm/errno.h> | ||
30 | #include <asm/semaphore.h> | ||
31 | |||
32 | /* | ||
33 | * Logic: | ||
34 | * - Only on a boundary condition do we need to care. When we go | ||
35 | * from a negative count to a non-negative, we wake people up. | ||
36 | * - When we go from a non-negative count to a negative do we | ||
37 | * (a) synchronize with the "sleepers" count and (b) make sure | ||
38 | * that we're on the wakeup list before we synchronize so that | ||
39 | * we cannot lose wakeup events. | ||
40 | */ | ||
41 | |||
42 | void | ||
43 | __up (struct semaphore *sem) | ||
44 | { | ||
45 | wake_up(&sem->wait); | ||
46 | } | ||
47 | |||
48 | void __sched __down (struct semaphore *sem) | ||
49 | { | ||
50 | struct task_struct *tsk = current; | ||
51 | DECLARE_WAITQUEUE(wait, tsk); | ||
52 | unsigned long flags; | ||
53 | |||
54 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
55 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
56 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
57 | |||
58 | sem->sleepers++; | ||
59 | for (;;) { | ||
60 | int sleepers = sem->sleepers; | ||
61 | |||
62 | /* | ||
63 | * Add "everybody else" into it. They aren't | ||
64 | * playing, because we own the spinlock in | ||
65 | * the wait_queue_head. | ||
66 | */ | ||
67 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
68 | sem->sleepers = 0; | ||
69 | break; | ||
70 | } | ||
71 | sem->sleepers = 1; /* us - see -1 above */ | ||
72 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
73 | |||
74 | schedule(); | ||
75 | |||
76 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
77 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
78 | } | ||
79 | remove_wait_queue_locked(&sem->wait, &wait); | ||
80 | wake_up_locked(&sem->wait); | ||
81 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
82 | tsk->state = TASK_RUNNING; | ||
83 | } | ||
84 | |||
85 | int __sched __down_interruptible (struct semaphore * sem) | ||
86 | { | ||
87 | int retval = 0; | ||
88 | struct task_struct *tsk = current; | ||
89 | DECLARE_WAITQUEUE(wait, tsk); | ||
90 | unsigned long flags; | ||
91 | |||
92 | tsk->state = TASK_INTERRUPTIBLE; | ||
93 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
94 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
95 | |||
96 | sem->sleepers ++; | ||
97 | for (;;) { | ||
98 | int sleepers = sem->sleepers; | ||
99 | |||
100 | /* | ||
101 | * With signals pending, this turns into | ||
102 | * the trylock failure case - we won't be | ||
103 | * sleeping, and we* can't get the lock as | ||
104 | * it has contention. Just correct the count | ||
105 | * and exit. | ||
106 | */ | ||
107 | if (signal_pending(current)) { | ||
108 | retval = -EINTR; | ||
109 | sem->sleepers = 0; | ||
110 | atomic_add(sleepers, &sem->count); | ||
111 | break; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Add "everybody else" into it. They aren't | ||
116 | * playing, because we own the spinlock in | ||
117 | * wait_queue_head. The "-1" is because we're | ||
118 | * still hoping to get the semaphore. | ||
119 | */ | ||
120 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
121 | sem->sleepers = 0; | ||
122 | break; | ||
123 | } | ||
124 | sem->sleepers = 1; /* us - see -1 above */ | ||
125 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
126 | |||
127 | schedule(); | ||
128 | |||
129 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
130 | tsk->state = TASK_INTERRUPTIBLE; | ||
131 | } | ||
132 | remove_wait_queue_locked(&sem->wait, &wait); | ||
133 | wake_up_locked(&sem->wait); | ||
134 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
135 | |||
136 | tsk->state = TASK_RUNNING; | ||
137 | return retval; | ||
138 | } | ||
139 | |||
140 | /* | ||
141 | * Trylock failed - make sure we correct for having decremented the | ||
142 | * count. | ||
143 | */ | ||
144 | int | ||
145 | __down_trylock (struct semaphore *sem) | ||
146 | { | ||
147 | unsigned long flags; | ||
148 | int sleepers; | ||
149 | |||
150 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
151 | sleepers = sem->sleepers + 1; | ||
152 | sem->sleepers = 0; | ||
153 | |||
154 | /* | ||
155 | * Add "everybody else" and us into it. They aren't | ||
156 | * playing, because we own the spinlock in the | ||
157 | * wait_queue_head. | ||
158 | */ | ||
159 | if (!atomic_add_negative(sleepers, &sem->count)) { | ||
160 | wake_up_locked(&sem->wait); | ||
161 | } | ||
162 | |||
163 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
164 | return 1; | ||
165 | } | ||
diff --git a/arch/m32r/kernel/Makefile b/arch/m32r/kernel/Makefile index e97e26e87c9e..09200d4886e3 100644 --- a/arch/m32r/kernel/Makefile +++ b/arch/m32r/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \ | 7 | obj-y := process.o entry.o traps.o align.o irq.o setup.o time.o \ |
8 | m32r_ksyms.o sys_m32r.o semaphore.o signal.o ptrace.o | 8 | m32r_ksyms.o sys_m32r.o signal.o ptrace.o |
9 | 9 | ||
10 | obj-$(CONFIG_SMP) += smp.o smpboot.o | 10 | obj-$(CONFIG_SMP) += smp.o smpboot.o |
11 | obj-$(CONFIG_MODULES) += module.o | 11 | obj-$(CONFIG_MODULES) += module.o |
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index 41a4c95e06d6..e6709fe950ba 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <linux/interrupt.h> | 7 | #include <linux/interrupt.h> |
8 | #include <linux/string.h> | 8 | #include <linux/string.h> |
9 | 9 | ||
10 | #include <asm/semaphore.h> | ||
11 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
12 | #include <asm/uaccess.h> | 11 | #include <asm/uaccess.h> |
13 | #include <asm/checksum.h> | 12 | #include <asm/checksum.h> |
@@ -22,10 +21,6 @@ EXPORT_SYMBOL(dump_fpu); | |||
22 | EXPORT_SYMBOL(__ioremap); | 21 | EXPORT_SYMBOL(__ioremap); |
23 | EXPORT_SYMBOL(iounmap); | 22 | EXPORT_SYMBOL(iounmap); |
24 | EXPORT_SYMBOL(kernel_thread); | 23 | EXPORT_SYMBOL(kernel_thread); |
25 | EXPORT_SYMBOL(__down); | ||
26 | EXPORT_SYMBOL(__down_interruptible); | ||
27 | EXPORT_SYMBOL(__up); | ||
28 | EXPORT_SYMBOL(__down_trylock); | ||
29 | 24 | ||
30 | /* Networking helper routines. */ | 25 | /* Networking helper routines. */ |
31 | /* Delay loops */ | 26 | /* Delay loops */ |
diff --git a/arch/m32r/kernel/semaphore.c b/arch/m32r/kernel/semaphore.c deleted file mode 100644 index 940c2d37cfd1..000000000000 --- a/arch/m32r/kernel/semaphore.c +++ /dev/null | |||
@@ -1,185 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/semaphore.c | ||
3 | * orig : i386 2.6.4 | ||
4 | * | ||
5 | * M32R semaphore implementation. | ||
6 | * | ||
7 | * Copyright (c) 2002 - 2004 Hitoshi Yamamoto | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * i386 semaphore implementation. | ||
12 | * | ||
13 | * (C) Copyright 1999 Linus Torvalds | ||
14 | * | ||
15 | * Portions Copyright 1999 Red Hat, Inc. | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or | ||
18 | * modify it under the terms of the GNU General Public License | ||
19 | * as published by the Free Software Foundation; either version | ||
20 | * 2 of the License, or (at your option) any later version. | ||
21 | * | ||
22 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | ||
23 | */ | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <asm/semaphore.h> | ||
28 | |||
29 | /* | ||
30 | * Semaphores are implemented using a two-way counter: | ||
31 | * The "count" variable is decremented for each process | ||
32 | * that tries to acquire the semaphore, while the "sleeping" | ||
33 | * variable is a count of such acquires. | ||
34 | * | ||
35 | * Notably, the inline "up()" and "down()" functions can | ||
36 | * efficiently test if they need to do any extra work (up | ||
37 | * needs to do something only if count was negative before | ||
38 | * the increment operation. | ||
39 | * | ||
40 | * "sleeping" and the contention routine ordering is protected | ||
41 | * by the spinlock in the semaphore's waitqueue head. | ||
42 | * | ||
43 | * Note that these functions are only called when there is | ||
44 | * contention on the lock, and as such all this is the | ||
45 | * "non-critical" part of the whole semaphore business. The | ||
46 | * critical part is the inline stuff in <asm/semaphore.h> | ||
47 | * where we want to avoid any extra jumps and calls. | ||
48 | */ | ||
49 | |||
50 | /* | ||
51 | * Logic: | ||
52 | * - only on a boundary condition do we need to care. When we go | ||
53 | * from a negative count to a non-negative, we wake people up. | ||
54 | * - when we go from a non-negative count to a negative do we | ||
55 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
56 | * that we're on the wakeup list before we synchronize so that | ||
57 | * we cannot lose wakeup events. | ||
58 | */ | ||
59 | |||
60 | asmlinkage void __up(struct semaphore *sem) | ||
61 | { | ||
62 | wake_up(&sem->wait); | ||
63 | } | ||
64 | |||
65 | asmlinkage void __sched __down(struct semaphore * sem) | ||
66 | { | ||
67 | struct task_struct *tsk = current; | ||
68 | DECLARE_WAITQUEUE(wait, tsk); | ||
69 | unsigned long flags; | ||
70 | |||
71 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
72 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
73 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
74 | |||
75 | sem->sleepers++; | ||
76 | for (;;) { | ||
77 | int sleepers = sem->sleepers; | ||
78 | |||
79 | /* | ||
80 | * Add "everybody else" into it. They aren't | ||
81 | * playing, because we own the spinlock in | ||
82 | * the wait_queue_head. | ||
83 | */ | ||
84 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
85 | sem->sleepers = 0; | ||
86 | break; | ||
87 | } | ||
88 | sem->sleepers = 1; /* us - see -1 above */ | ||
89 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
90 | |||
91 | schedule(); | ||
92 | |||
93 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
94 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
95 | } | ||
96 | remove_wait_queue_locked(&sem->wait, &wait); | ||
97 | wake_up_locked(&sem->wait); | ||
98 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
99 | tsk->state = TASK_RUNNING; | ||
100 | } | ||
101 | |||
102 | asmlinkage int __sched __down_interruptible(struct semaphore * sem) | ||
103 | { | ||
104 | int retval = 0; | ||
105 | struct task_struct *tsk = current; | ||
106 | DECLARE_WAITQUEUE(wait, tsk); | ||
107 | unsigned long flags; | ||
108 | |||
109 | tsk->state = TASK_INTERRUPTIBLE; | ||
110 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
111 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
112 | |||
113 | sem->sleepers++; | ||
114 | for (;;) { | ||
115 | int sleepers = sem->sleepers; | ||
116 | |||
117 | /* | ||
118 | * With signals pending, this turns into | ||
119 | * the trylock failure case - we won't be | ||
120 | * sleeping, and we* can't get the lock as | ||
121 | * it has contention. Just correct the count | ||
122 | * and exit. | ||
123 | */ | ||
124 | if (signal_pending(current)) { | ||
125 | retval = -EINTR; | ||
126 | sem->sleepers = 0; | ||
127 | atomic_add(sleepers, &sem->count); | ||
128 | break; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * Add "everybody else" into it. They aren't | ||
133 | * playing, because we own the spinlock in | ||
134 | * wait_queue_head. The "-1" is because we're | ||
135 | * still hoping to get the semaphore. | ||
136 | */ | ||
137 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
138 | sem->sleepers = 0; | ||
139 | break; | ||
140 | } | ||
141 | sem->sleepers = 1; /* us - see -1 above */ | ||
142 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
143 | |||
144 | schedule(); | ||
145 | |||
146 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
147 | tsk->state = TASK_INTERRUPTIBLE; | ||
148 | } | ||
149 | remove_wait_queue_locked(&sem->wait, &wait); | ||
150 | wake_up_locked(&sem->wait); | ||
151 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
152 | |||
153 | tsk->state = TASK_RUNNING; | ||
154 | return retval; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * Trylock failed - make sure we correct for | ||
159 | * having decremented the count. | ||
160 | * | ||
161 | * We could have done the trylock with a | ||
162 | * single "cmpxchg" without failure cases, | ||
163 | * but then it wouldn't work on a 386. | ||
164 | */ | ||
165 | asmlinkage int __down_trylock(struct semaphore * sem) | ||
166 | { | ||
167 | int sleepers; | ||
168 | unsigned long flags; | ||
169 | |||
170 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
171 | sleepers = sem->sleepers + 1; | ||
172 | sem->sleepers = 0; | ||
173 | |||
174 | /* | ||
175 | * Add "everybody else" and us into it. They aren't | ||
176 | * playing, because we own the spinlock in the | ||
177 | * wait_queue_head. | ||
178 | */ | ||
179 | if (!atomic_add_negative(sleepers, &sem->count)) { | ||
180 | wake_up_locked(&sem->wait); | ||
181 | } | ||
182 | |||
183 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
184 | return 1; | ||
185 | } | ||
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index a806208c7fb5..7a62a718143b 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile | |||
@@ -10,7 +10,7 @@ endif | |||
10 | extra-y += vmlinux.lds | 10 | extra-y += vmlinux.lds |
11 | 11 | ||
12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ | 12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ |
13 | sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o | 13 | sys_m68k.o time.o setup.o m68k_ksyms.o devres.o |
14 | 14 | ||
15 | devres-y = ../../../kernel/irq/devres.o | 15 | devres-y = ../../../kernel/irq/devres.o |
16 | 16 | ||
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c index 6fc69c74fe2e..d900e77e5363 100644 --- a/arch/m68k/kernel/m68k_ksyms.c +++ b/arch/m68k/kernel/m68k_ksyms.c | |||
@@ -1,5 +1,4 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <asm/semaphore.h> | ||
3 | 2 | ||
4 | asmlinkage long long __ashldi3 (long long, int); | 3 | asmlinkage long long __ashldi3 (long long, int); |
5 | asmlinkage long long __ashrdi3 (long long, int); | 4 | asmlinkage long long __ashrdi3 (long long, int); |
@@ -15,8 +14,3 @@ EXPORT_SYMBOL(__ashrdi3); | |||
15 | EXPORT_SYMBOL(__lshrdi3); | 14 | EXPORT_SYMBOL(__lshrdi3); |
16 | EXPORT_SYMBOL(__muldi3); | 15 | EXPORT_SYMBOL(__muldi3); |
17 | 16 | ||
18 | EXPORT_SYMBOL(__down_failed); | ||
19 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
20 | EXPORT_SYMBOL(__down_failed_trylock); | ||
21 | EXPORT_SYMBOL(__up_wakeup); | ||
22 | |||
diff --git a/arch/m68k/kernel/semaphore.c b/arch/m68k/kernel/semaphore.c deleted file mode 100644 index d12cbbfe6ebd..000000000000 --- a/arch/m68k/kernel/semaphore.c +++ /dev/null | |||
@@ -1,132 +0,0 @@ | |||
1 | /* | ||
2 | * Generic semaphore code. Buyer beware. Do your own | ||
3 | * specific changes in <asm/semaphore-helper.h> | ||
4 | */ | ||
5 | |||
6 | #include <linux/sched.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <asm/semaphore-helper.h> | ||
9 | |||
10 | #ifndef CONFIG_RMW_INSNS | ||
11 | spinlock_t semaphore_wake_lock; | ||
12 | #endif | ||
13 | |||
14 | /* | ||
15 | * Semaphores are implemented using a two-way counter: | ||
16 | * The "count" variable is decremented for each process | ||
17 | * that tries to sleep, while the "waking" variable is | ||
18 | * incremented when the "up()" code goes to wake up waiting | ||
19 | * processes. | ||
20 | * | ||
21 | * Notably, the inline "up()" and "down()" functions can | ||
22 | * efficiently test if they need to do any extra work (up | ||
23 | * needs to do something only if count was negative before | ||
24 | * the increment operation. | ||
25 | * | ||
26 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
27 | * atomically. | ||
28 | * | ||
29 | * When __up() is called, the count was negative before | ||
30 | * incrementing it, and we need to wake up somebody. | ||
31 | * | ||
32 | * This routine adds one to the count of processes that need to | ||
33 | * wake up and exit. ALL waiting processes actually wake up but | ||
34 | * only the one that gets to the "waking" field first will gate | ||
35 | * through and acquire the semaphore. The others will go back | ||
36 | * to sleep. | ||
37 | * | ||
38 | * Note that these functions are only called when there is | ||
39 | * contention on the lock, and as such all this is the | ||
40 | * "non-critical" part of the whole semaphore business. The | ||
41 | * critical part is the inline stuff in <asm/semaphore.h> | ||
42 | * where we want to avoid any extra jumps and calls. | ||
43 | */ | ||
44 | void __up(struct semaphore *sem) | ||
45 | { | ||
46 | wake_one_more(sem); | ||
47 | wake_up(&sem->wait); | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * Perform the "down" function. Return zero for semaphore acquired, | ||
52 | * return negative for signalled out of the function. | ||
53 | * | ||
54 | * If called from __down, the return is ignored and the wait loop is | ||
55 | * not interruptible. This means that a task waiting on a semaphore | ||
56 | * using "down()" cannot be killed until someone does an "up()" on | ||
57 | * the semaphore. | ||
58 | * | ||
59 | * If called from __down_interruptible, the return value gets checked | ||
60 | * upon return. If the return value is negative then the task continues | ||
61 | * with the negative value in the return register (it can be tested by | ||
62 | * the caller). | ||
63 | * | ||
64 | * Either form may be used in conjunction with "up()". | ||
65 | * | ||
66 | */ | ||
67 | |||
68 | |||
69 | #define DOWN_HEAD(task_state) \ | ||
70 | \ | ||
71 | \ | ||
72 | current->state = (task_state); \ | ||
73 | add_wait_queue(&sem->wait, &wait); \ | ||
74 | \ | ||
75 | /* \ | ||
76 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
77 | * so we must wait. \ | ||
78 | * \ | ||
79 | * We can let go the lock for purposes of waiting. \ | ||
80 | * We re-acquire it after awaking so as to protect \ | ||
81 | * all semaphore operations. \ | ||
82 | * \ | ||
83 | * If "up()" is called before we call waking_non_zero() then \ | ||
84 | * we will catch it right away. If it is called later then \ | ||
85 | * we will have to go through a wakeup cycle to catch it. \ | ||
86 | * \ | ||
87 | * Multiple waiters contend for the semaphore lock to see \ | ||
88 | * who gets to gate through and who has to wait some more. \ | ||
89 | */ \ | ||
90 | for (;;) { | ||
91 | |||
92 | #define DOWN_TAIL(task_state) \ | ||
93 | current->state = (task_state); \ | ||
94 | } \ | ||
95 | current->state = TASK_RUNNING; \ | ||
96 | remove_wait_queue(&sem->wait, &wait); | ||
97 | |||
98 | void __sched __down(struct semaphore * sem) | ||
99 | { | ||
100 | DECLARE_WAITQUEUE(wait, current); | ||
101 | |||
102 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
103 | if (waking_non_zero(sem)) | ||
104 | break; | ||
105 | schedule(); | ||
106 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
107 | } | ||
108 | |||
109 | int __sched __down_interruptible(struct semaphore * sem) | ||
110 | { | ||
111 | DECLARE_WAITQUEUE(wait, current); | ||
112 | int ret = 0; | ||
113 | |||
114 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
115 | |||
116 | ret = waking_non_zero_interruptible(sem, current); | ||
117 | if (ret) | ||
118 | { | ||
119 | if (ret == 1) | ||
120 | /* ret != 0 only if we get interrupted -arca */ | ||
121 | ret = 0; | ||
122 | break; | ||
123 | } | ||
124 | schedule(); | ||
125 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | int __down_trylock(struct semaphore * sem) | ||
130 | { | ||
131 | return waking_non_zero_trylock(sem); | ||
132 | } | ||
diff --git a/arch/m68k/lib/Makefile b/arch/m68k/lib/Makefile index 6bbf19f96007..a18af095cd7c 100644 --- a/arch/m68k/lib/Makefile +++ b/arch/m68k/lib/Makefile | |||
@@ -5,4 +5,4 @@ | |||
5 | EXTRA_AFLAGS := -traditional | 5 | EXTRA_AFLAGS := -traditional |
6 | 6 | ||
7 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ | 7 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ |
8 | checksum.o string.o semaphore.o uaccess.o | 8 | checksum.o string.o uaccess.o |
diff --git a/arch/m68k/lib/semaphore.S b/arch/m68k/lib/semaphore.S deleted file mode 100644 index 0215624c1602..000000000000 --- a/arch/m68k/lib/semaphore.S +++ /dev/null | |||
@@ -1,53 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/lib/semaphore.S | ||
3 | * | ||
4 | * Copyright (C) 1996 Linus Torvalds | ||
5 | * | ||
6 | * m68k version by Andreas Schwab | ||
7 | */ | ||
8 | |||
9 | #include <linux/linkage.h> | ||
10 | #include <asm/semaphore.h> | ||
11 | |||
12 | /* | ||
13 | * The semaphore operations have a special calling sequence that | ||
14 | * allow us to do a simpler in-line version of them. These routines | ||
15 | * need to convert that sequence back into the C sequence when | ||
16 | * there is contention on the semaphore. | ||
17 | */ | ||
18 | ENTRY(__down_failed) | ||
19 | moveml %a0/%d0/%d1,-(%sp) | ||
20 | movel %a1,-(%sp) | ||
21 | jbsr __down | ||
22 | movel (%sp)+,%a1 | ||
23 | moveml (%sp)+,%a0/%d0/%d1 | ||
24 | rts | ||
25 | |||
26 | ENTRY(__down_failed_interruptible) | ||
27 | movel %a0,-(%sp) | ||
28 | movel %d1,-(%sp) | ||
29 | movel %a1,-(%sp) | ||
30 | jbsr __down_interruptible | ||
31 | movel (%sp)+,%a1 | ||
32 | movel (%sp)+,%d1 | ||
33 | movel (%sp)+,%a0 | ||
34 | rts | ||
35 | |||
36 | ENTRY(__down_failed_trylock) | ||
37 | movel %a0,-(%sp) | ||
38 | movel %d1,-(%sp) | ||
39 | movel %a1,-(%sp) | ||
40 | jbsr __down_trylock | ||
41 | movel (%sp)+,%a1 | ||
42 | movel (%sp)+,%d1 | ||
43 | movel (%sp)+,%a0 | ||
44 | rts | ||
45 | |||
46 | ENTRY(__up_wakeup) | ||
47 | moveml %a0/%d0/%d1,-(%sp) | ||
48 | movel %a1,-(%sp) | ||
49 | jbsr __up | ||
50 | movel (%sp)+,%a1 | ||
51 | moveml (%sp)+,%a0/%d0/%d1 | ||
52 | rts | ||
53 | |||
diff --git a/arch/m68knommu/kernel/Makefile b/arch/m68knommu/kernel/Makefile index 1524b39ad63f..f0eab3dedb5a 100644 --- a/arch/m68knommu/kernel/Makefile +++ b/arch/m68knommu/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := vmlinux.lds | 5 | extra-y := vmlinux.lds |
6 | 6 | ||
7 | obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \ | 7 | obj-y += dma.o entry.o init_task.o irq.o m68k_ksyms.o process.o ptrace.o \ |
8 | semaphore.o setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o | 8 | setup.o signal.o syscalltable.o sys_m68k.o time.o traps.o |
9 | 9 | ||
10 | obj-$(CONFIG_MODULES) += module.o | 10 | obj-$(CONFIG_MODULES) += module.o |
11 | obj-$(CONFIG_COMEMPCI) += comempci.o | 11 | obj-$(CONFIG_COMEMPCI) += comempci.o |
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c index 53fad1490282..39fe0a7aec32 100644 --- a/arch/m68knommu/kernel/m68k_ksyms.c +++ b/arch/m68knommu/kernel/m68k_ksyms.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <asm/pgalloc.h> | 13 | #include <asm/pgalloc.h> |
14 | #include <asm/irq.h> | 14 | #include <asm/irq.h> |
15 | #include <asm/io.h> | 15 | #include <asm/io.h> |
16 | #include <asm/semaphore.h> | ||
17 | #include <asm/checksum.h> | 16 | #include <asm/checksum.h> |
18 | #include <asm/current.h> | 17 | #include <asm/current.h> |
19 | 18 | ||
@@ -39,11 +38,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); | |||
39 | EXPORT_SYMBOL(memcpy); | 38 | EXPORT_SYMBOL(memcpy); |
40 | EXPORT_SYMBOL(memset); | 39 | EXPORT_SYMBOL(memset); |
41 | 40 | ||
42 | EXPORT_SYMBOL(__down_failed); | ||
43 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
44 | EXPORT_SYMBOL(__down_failed_trylock); | ||
45 | EXPORT_SYMBOL(__up_wakeup); | ||
46 | |||
47 | /* | 41 | /* |
48 | * libgcc functions - functions that are used internally by the | 42 | * libgcc functions - functions that are used internally by the |
49 | * compiler... (prototypes are not correct though, but that | 43 | * compiler... (prototypes are not correct though, but that |
diff --git a/arch/m68knommu/kernel/semaphore.c b/arch/m68knommu/kernel/semaphore.c deleted file mode 100644 index bce2bc7d87c6..000000000000 --- a/arch/m68knommu/kernel/semaphore.c +++ /dev/null | |||
@@ -1,133 +0,0 @@ | |||
1 | /* | ||
2 | * Generic semaphore code. Buyer beware. Do your own | ||
3 | * specific changes in <asm/semaphore-helper.h> | ||
4 | */ | ||
5 | |||
6 | #include <linux/sched.h> | ||
7 | #include <linux/err.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <asm/semaphore-helper.h> | ||
10 | |||
11 | #ifndef CONFIG_RMW_INSNS | ||
12 | spinlock_t semaphore_wake_lock; | ||
13 | #endif | ||
14 | |||
15 | /* | ||
16 | * Semaphores are implemented using a two-way counter: | ||
17 | * The "count" variable is decremented for each process | ||
18 | * that tries to sleep, while the "waking" variable is | ||
19 | * incremented when the "up()" code goes to wake up waiting | ||
20 | * processes. | ||
21 | * | ||
22 | * Notably, the inline "up()" and "down()" functions can | ||
23 | * efficiently test if they need to do any extra work (up | ||
24 | * needs to do something only if count was negative before | ||
25 | * the increment operation. | ||
26 | * | ||
27 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
28 | * atomically. | ||
29 | * | ||
30 | * When __up() is called, the count was negative before | ||
31 | * incrementing it, and we need to wake up somebody. | ||
32 | * | ||
33 | * This routine adds one to the count of processes that need to | ||
34 | * wake up and exit. ALL waiting processes actually wake up but | ||
35 | * only the one that gets to the "waking" field first will gate | ||
36 | * through and acquire the semaphore. The others will go back | ||
37 | * to sleep. | ||
38 | * | ||
39 | * Note that these functions are only called when there is | ||
40 | * contention on the lock, and as such all this is the | ||
41 | * "non-critical" part of the whole semaphore business. The | ||
42 | * critical part is the inline stuff in <asm/semaphore.h> | ||
43 | * where we want to avoid any extra jumps and calls. | ||
44 | */ | ||
45 | void __up(struct semaphore *sem) | ||
46 | { | ||
47 | wake_one_more(sem); | ||
48 | wake_up(&sem->wait); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Perform the "down" function. Return zero for semaphore acquired, | ||
53 | * return negative for signalled out of the function. | ||
54 | * | ||
55 | * If called from __down, the return is ignored and the wait loop is | ||
56 | * not interruptible. This means that a task waiting on a semaphore | ||
57 | * using "down()" cannot be killed until someone does an "up()" on | ||
58 | * the semaphore. | ||
59 | * | ||
60 | * If called from __down_interruptible, the return value gets checked | ||
61 | * upon return. If the return value is negative then the task continues | ||
62 | * with the negative value in the return register (it can be tested by | ||
63 | * the caller). | ||
64 | * | ||
65 | * Either form may be used in conjunction with "up()". | ||
66 | * | ||
67 | */ | ||
68 | |||
69 | |||
70 | #define DOWN_HEAD(task_state) \ | ||
71 | \ | ||
72 | \ | ||
73 | current->state = (task_state); \ | ||
74 | add_wait_queue(&sem->wait, &wait); \ | ||
75 | \ | ||
76 | /* \ | ||
77 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
78 | * so we must wait. \ | ||
79 | * \ | ||
80 | * We can let go the lock for purposes of waiting. \ | ||
81 | * We re-acquire it after awaking so as to protect \ | ||
82 | * all semaphore operations. \ | ||
83 | * \ | ||
84 | * If "up()" is called before we call waking_non_zero() then \ | ||
85 | * we will catch it right away. If it is called later then \ | ||
86 | * we will have to go through a wakeup cycle to catch it. \ | ||
87 | * \ | ||
88 | * Multiple waiters contend for the semaphore lock to see \ | ||
89 | * who gets to gate through and who has to wait some more. \ | ||
90 | */ \ | ||
91 | for (;;) { | ||
92 | |||
93 | #define DOWN_TAIL(task_state) \ | ||
94 | current->state = (task_state); \ | ||
95 | } \ | ||
96 | current->state = TASK_RUNNING; \ | ||
97 | remove_wait_queue(&sem->wait, &wait); | ||
98 | |||
99 | void __sched __down(struct semaphore * sem) | ||
100 | { | ||
101 | DECLARE_WAITQUEUE(wait, current); | ||
102 | |||
103 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
104 | if (waking_non_zero(sem)) | ||
105 | break; | ||
106 | schedule(); | ||
107 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
108 | } | ||
109 | |||
110 | int __sched __down_interruptible(struct semaphore * sem) | ||
111 | { | ||
112 | DECLARE_WAITQUEUE(wait, current); | ||
113 | int ret = 0; | ||
114 | |||
115 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
116 | |||
117 | ret = waking_non_zero_interruptible(sem, current); | ||
118 | if (ret) | ||
119 | { | ||
120 | if (ret == 1) | ||
121 | /* ret != 0 only if we get interrupted -arca */ | ||
122 | ret = 0; | ||
123 | break; | ||
124 | } | ||
125 | schedule(); | ||
126 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | int __down_trylock(struct semaphore * sem) | ||
131 | { | ||
132 | return waking_non_zero_trylock(sem); | ||
133 | } | ||
diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile index e051a7913987..d94d709665aa 100644 --- a/arch/m68knommu/lib/Makefile +++ b/arch/m68knommu/lib/Makefile | |||
@@ -4,4 +4,4 @@ | |||
4 | 4 | ||
5 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ | 5 | lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ |
6 | muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ | 6 | muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ |
7 | checksum.o semaphore.o memcpy.o memset.o delay.o | 7 | checksum.o memcpy.o memset.o delay.o |
diff --git a/arch/m68knommu/lib/semaphore.S b/arch/m68knommu/lib/semaphore.S deleted file mode 100644 index 87c746034376..000000000000 --- a/arch/m68knommu/lib/semaphore.S +++ /dev/null | |||
@@ -1,66 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/lib/semaphore.S | ||
3 | * | ||
4 | * Copyright (C) 1996 Linus Torvalds | ||
5 | * | ||
6 | * m68k version by Andreas Schwab | ||
7 | * | ||
8 | * MAR/1999 -- modified to support ColdFire (gerg@snapgear.com) | ||
9 | */ | ||
10 | |||
11 | #include <linux/linkage.h> | ||
12 | #include <asm/semaphore.h> | ||
13 | |||
14 | /* | ||
15 | * "down_failed" is called with the eventual return address | ||
16 | * in %a0, and the address of the semaphore in %a1. We need | ||
17 | * to increment the number of waiters on the semaphore, | ||
18 | * call "__down()", and then eventually return to try again. | ||
19 | */ | ||
20 | ENTRY(__down_failed) | ||
21 | #ifdef CONFIG_COLDFIRE | ||
22 | subl #12,%sp | ||
23 | moveml %a0/%d0/%d1,(%sp) | ||
24 | #else | ||
25 | moveml %a0/%d0/%d1,-(%sp) | ||
26 | #endif | ||
27 | movel %a1,-(%sp) | ||
28 | jbsr __down | ||
29 | movel (%sp)+,%a1 | ||
30 | movel (%sp)+,%d0 | ||
31 | movel (%sp)+,%d1 | ||
32 | rts | ||
33 | |||
34 | ENTRY(__down_failed_interruptible) | ||
35 | movel %a0,-(%sp) | ||
36 | movel %d1,-(%sp) | ||
37 | movel %a1,-(%sp) | ||
38 | jbsr __down_interruptible | ||
39 | movel (%sp)+,%a1 | ||
40 | movel (%sp)+,%d1 | ||
41 | rts | ||
42 | |||
43 | ENTRY(__up_wakeup) | ||
44 | #ifdef CONFIG_COLDFIRE | ||
45 | subl #12,%sp | ||
46 | moveml %a0/%d0/%d1,(%sp) | ||
47 | #else | ||
48 | moveml %a0/%d0/%d1,-(%sp) | ||
49 | #endif | ||
50 | movel %a1,-(%sp) | ||
51 | jbsr __up | ||
52 | movel (%sp)+,%a1 | ||
53 | movel (%sp)+,%d0 | ||
54 | movel (%sp)+,%d1 | ||
55 | rts | ||
56 | |||
57 | ENTRY(__down_failed_trylock) | ||
58 | movel %a0,-(%sp) | ||
59 | movel %d1,-(%sp) | ||
60 | movel %a1,-(%sp) | ||
61 | jbsr __down_trylock | ||
62 | movel (%sp)+,%a1 | ||
63 | movel (%sp)+,%d1 | ||
64 | movel (%sp)+,%a0 | ||
65 | rts | ||
66 | |||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 9e78e1a4ca17..6fcdb6fda2e2 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := head.o init_task.o vmlinux.lds | 5 | extra-y := head.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | 7 | obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ |
8 | ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ | 8 | ptrace.o reset.o setup.o signal.o syscall.o \ |
9 | time.o topology.o traps.o unaligned.o | 9 | time.o topology.o traps.o unaligned.o |
10 | 10 | ||
11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
diff --git a/arch/mips/kernel/semaphore.c b/arch/mips/kernel/semaphore.c deleted file mode 100644 index 1265358cdca1..000000000000 --- a/arch/mips/kernel/semaphore.c +++ /dev/null | |||
@@ -1,168 +0,0 @@ | |||
1 | /* | ||
2 | * MIPS-specific semaphore code. | ||
3 | * | ||
4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> | ||
13 | * to eliminate the SMP races in the old version between the updates | ||
14 | * of `count' and `waking'. Now we use negative `count' values to | ||
15 | * indicate that some process(es) are waiting for the semaphore. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <asm/atomic.h> | ||
22 | #include <asm/cpu-features.h> | ||
23 | #include <asm/errno.h> | ||
24 | #include <asm/semaphore.h> | ||
25 | #include <asm/war.h> | ||
26 | /* | ||
27 | * Atomically update sem->count. | ||
28 | * This does the equivalent of the following: | ||
29 | * | ||
30 | * old_count = sem->count; | ||
31 | * tmp = MAX(old_count, 0) + incr; | ||
32 | * sem->count = tmp; | ||
33 | * return old_count; | ||
34 | * | ||
35 | * On machines without lld/scd we need a spinlock to make the manipulation of | ||
36 | * sem->count and sem->waking atomic. Scalability isn't an issue because | ||
37 | * this lock is used on UP only so it's just an empty variable. | ||
38 | */ | ||
39 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
40 | { | ||
41 | int old_count, tmp; | ||
42 | |||
43 | if (cpu_has_llsc && R10000_LLSC_WAR) { | ||
44 | __asm__ __volatile__( | ||
45 | " .set mips3 \n" | ||
46 | "1: ll %0, %2 # __sem_update_count \n" | ||
47 | " sra %1, %0, 31 \n" | ||
48 | " not %1 \n" | ||
49 | " and %1, %0, %1 \n" | ||
50 | " addu %1, %1, %3 \n" | ||
51 | " sc %1, %2 \n" | ||
52 | " beqzl %1, 1b \n" | ||
53 | " .set mips0 \n" | ||
54 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
55 | : "r" (incr), "m" (sem->count)); | ||
56 | } else if (cpu_has_llsc) { | ||
57 | __asm__ __volatile__( | ||
58 | " .set mips3 \n" | ||
59 | "1: ll %0, %2 # __sem_update_count \n" | ||
60 | " sra %1, %0, 31 \n" | ||
61 | " not %1 \n" | ||
62 | " and %1, %0, %1 \n" | ||
63 | " addu %1, %1, %3 \n" | ||
64 | " sc %1, %2 \n" | ||
65 | " beqz %1, 1b \n" | ||
66 | " .set mips0 \n" | ||
67 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
68 | : "r" (incr), "m" (sem->count)); | ||
69 | } else { | ||
70 | static DEFINE_SPINLOCK(semaphore_lock); | ||
71 | unsigned long flags; | ||
72 | |||
73 | spin_lock_irqsave(&semaphore_lock, flags); | ||
74 | old_count = atomic_read(&sem->count); | ||
75 | tmp = max_t(int, old_count, 0) + incr; | ||
76 | atomic_set(&sem->count, tmp); | ||
77 | spin_unlock_irqrestore(&semaphore_lock, flags); | ||
78 | } | ||
79 | |||
80 | return old_count; | ||
81 | } | ||
82 | |||
83 | void __up(struct semaphore *sem) | ||
84 | { | ||
85 | /* | ||
86 | * Note that we incremented count in up() before we came here, | ||
87 | * but that was ineffective since the result was <= 0, and | ||
88 | * any negative value of count is equivalent to 0. | ||
89 | * This ends up setting count to 1, unless count is now > 0 | ||
90 | * (i.e. because some other cpu has called up() in the meantime), | ||
91 | * in which case we just increment count. | ||
92 | */ | ||
93 | __sem_update_count(sem, 1); | ||
94 | wake_up(&sem->wait); | ||
95 | } | ||
96 | |||
97 | EXPORT_SYMBOL(__up); | ||
98 | |||
99 | /* | ||
100 | * Note that when we come in to __down or __down_interruptible, | ||
101 | * we have already decremented count, but that decrement was | ||
102 | * ineffective since the result was < 0, and any negative value | ||
103 | * of count is equivalent to 0. | ||
104 | * Thus it is only when we decrement count from some value > 0 | ||
105 | * that we have actually got the semaphore. | ||
106 | */ | ||
107 | void __sched __down(struct semaphore *sem) | ||
108 | { | ||
109 | struct task_struct *tsk = current; | ||
110 | DECLARE_WAITQUEUE(wait, tsk); | ||
111 | |||
112 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
113 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
114 | |||
115 | /* | ||
116 | * Try to get the semaphore. If the count is > 0, then we've | ||
117 | * got the semaphore; we decrement count and exit the loop. | ||
118 | * If the count is 0 or negative, we set it to -1, indicating | ||
119 | * that we are asleep, and then sleep. | ||
120 | */ | ||
121 | while (__sem_update_count(sem, -1) <= 0) { | ||
122 | schedule(); | ||
123 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
124 | } | ||
125 | remove_wait_queue(&sem->wait, &wait); | ||
126 | __set_task_state(tsk, TASK_RUNNING); | ||
127 | |||
128 | /* | ||
129 | * If there are any more sleepers, wake one of them up so | ||
130 | * that it can either get the semaphore, or set count to -1 | ||
131 | * indicating that there are still processes sleeping. | ||
132 | */ | ||
133 | wake_up(&sem->wait); | ||
134 | } | ||
135 | |||
136 | EXPORT_SYMBOL(__down); | ||
137 | |||
138 | int __sched __down_interruptible(struct semaphore * sem) | ||
139 | { | ||
140 | int retval = 0; | ||
141 | struct task_struct *tsk = current; | ||
142 | DECLARE_WAITQUEUE(wait, tsk); | ||
143 | |||
144 | __set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
145 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
146 | |||
147 | while (__sem_update_count(sem, -1) <= 0) { | ||
148 | if (signal_pending(current)) { | ||
149 | /* | ||
150 | * A signal is pending - give up trying. | ||
151 | * Set sem->count to 0 if it is negative, | ||
152 | * since we are no longer sleeping. | ||
153 | */ | ||
154 | __sem_update_count(sem, 0); | ||
155 | retval = -EINTR; | ||
156 | break; | ||
157 | } | ||
158 | schedule(); | ||
159 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
160 | } | ||
161 | remove_wait_queue(&sem->wait, &wait); | ||
162 | __set_task_state(tsk, TASK_RUNNING); | ||
163 | |||
164 | wake_up(&sem->wait); | ||
165 | return retval; | ||
166 | } | ||
167 | |||
168 | EXPORT_SYMBOL(__down_interruptible); | ||
diff --git a/arch/mn10300/kernel/Makefile b/arch/mn10300/kernel/Makefile index ef07c956170a..23f2ab67574c 100644 --- a/arch/mn10300/kernel/Makefile +++ b/arch/mn10300/kernel/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | extra-y := head.o init_task.o vmlinux.lds | 4 | extra-y := head.o init_task.o vmlinux.lds |
5 | 5 | ||
6 | obj-y := process.o semaphore.o signal.o entry.o fpu.o traps.o irq.o \ | 6 | obj-y := process.o signal.o entry.o fpu.o traps.o irq.o \ |
7 | ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ | 7 | ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \ |
8 | switch_to.o mn10300_ksyms.o kernel_execve.o | 8 | switch_to.o mn10300_ksyms.o kernel_execve.o |
9 | 9 | ||
diff --git a/arch/mn10300/kernel/semaphore.c b/arch/mn10300/kernel/semaphore.c deleted file mode 100644 index 9153c4039fd2..000000000000 --- a/arch/mn10300/kernel/semaphore.c +++ /dev/null | |||
@@ -1,149 +0,0 @@ | |||
1 | /* MN10300 Semaphore implementation | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <asm/semaphore.h> | ||
14 | |||
15 | struct sem_waiter { | ||
16 | struct list_head list; | ||
17 | struct task_struct *task; | ||
18 | }; | ||
19 | |||
20 | #if SEMAPHORE_DEBUG | ||
21 | void semtrace(struct semaphore *sem, const char *str) | ||
22 | { | ||
23 | if (sem->debug) | ||
24 | printk(KERN_DEBUG "[%d] %s({%d,%d})\n", | ||
25 | current->pid, | ||
26 | str, | ||
27 | atomic_read(&sem->count), | ||
28 | list_empty(&sem->wait_list) ? 0 : 1); | ||
29 | } | ||
30 | #else | ||
31 | #define semtrace(SEM, STR) do { } while (0) | ||
32 | #endif | ||
33 | |||
34 | /* | ||
35 | * wait for a token to be granted from a semaphore | ||
36 | * - entered with lock held and interrupts disabled | ||
37 | */ | ||
38 | void __down(struct semaphore *sem, unsigned long flags) | ||
39 | { | ||
40 | struct task_struct *tsk = current; | ||
41 | struct sem_waiter waiter; | ||
42 | |||
43 | semtrace(sem, "Entering __down"); | ||
44 | |||
45 | /* set up my own style of waitqueue */ | ||
46 | waiter.task = tsk; | ||
47 | get_task_struct(tsk); | ||
48 | |||
49 | list_add_tail(&waiter.list, &sem->wait_list); | ||
50 | |||
51 | /* we don't need to touch the semaphore struct anymore */ | ||
52 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
53 | |||
54 | /* wait to be given the semaphore */ | ||
55 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
56 | |||
57 | for (;;) { | ||
58 | if (!waiter.task) | ||
59 | break; | ||
60 | schedule(); | ||
61 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
62 | } | ||
63 | |||
64 | tsk->state = TASK_RUNNING; | ||
65 | semtrace(sem, "Leaving __down"); | ||
66 | } | ||
67 | EXPORT_SYMBOL(__down); | ||
68 | |||
69 | /* | ||
70 | * interruptibly wait for a token to be granted from a semaphore | ||
71 | * - entered with lock held and interrupts disabled | ||
72 | */ | ||
73 | int __down_interruptible(struct semaphore *sem, unsigned long flags) | ||
74 | { | ||
75 | struct task_struct *tsk = current; | ||
76 | struct sem_waiter waiter; | ||
77 | int ret; | ||
78 | |||
79 | semtrace(sem, "Entering __down_interruptible"); | ||
80 | |||
81 | /* set up my own style of waitqueue */ | ||
82 | waiter.task = tsk; | ||
83 | get_task_struct(tsk); | ||
84 | |||
85 | list_add_tail(&waiter.list, &sem->wait_list); | ||
86 | |||
87 | /* we don't need to touch the semaphore struct anymore */ | ||
88 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
89 | |||
90 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
91 | |||
92 | /* wait to be given the semaphore */ | ||
93 | ret = 0; | ||
94 | for (;;) { | ||
95 | if (!waiter.task) | ||
96 | break; | ||
97 | if (unlikely(signal_pending(current))) | ||
98 | goto interrupted; | ||
99 | schedule(); | ||
100 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
101 | } | ||
102 | |||
103 | out: | ||
104 | tsk->state = TASK_RUNNING; | ||
105 | semtrace(sem, "Leaving __down_interruptible"); | ||
106 | return ret; | ||
107 | |||
108 | interrupted: | ||
109 | spin_lock_irqsave(&sem->wait_lock, flags); | ||
110 | list_del(&waiter.list); | ||
111 | spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
112 | |||
113 | ret = 0; | ||
114 | if (!waiter.task) { | ||
115 | put_task_struct(current); | ||
116 | ret = -EINTR; | ||
117 | } | ||
118 | goto out; | ||
119 | } | ||
120 | EXPORT_SYMBOL(__down_interruptible); | ||
121 | |||
122 | /* | ||
123 | * release a single token back to a semaphore | ||
124 | * - entered with lock held and interrupts disabled | ||
125 | */ | ||
126 | void __up(struct semaphore *sem) | ||
127 | { | ||
128 | struct task_struct *tsk; | ||
129 | struct sem_waiter *waiter; | ||
130 | |||
131 | semtrace(sem, "Entering __up"); | ||
132 | |||
133 | /* grant the token to the process at the front of the queue */ | ||
134 | waiter = list_entry(sem->wait_list.next, struct sem_waiter, list); | ||
135 | |||
136 | /* We must be careful not to touch 'waiter' after we set ->task = NULL. | ||
137 | * It is an allocated on the waiter's stack and may become invalid at | ||
138 | * any time after that point (due to a wakeup from another source). | ||
139 | */ | ||
140 | list_del_init(&waiter->list); | ||
141 | tsk = waiter->task; | ||
142 | smp_mb(); | ||
143 | waiter->task = NULL; | ||
144 | wake_up_process(tsk); | ||
145 | put_task_struct(tsk); | ||
146 | |||
147 | semtrace(sem, "Leaving __up"); | ||
148 | } | ||
149 | EXPORT_SYMBOL(__up); | ||
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index 27827bc3717e..1f6585a56f97 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile | |||
@@ -9,7 +9,7 @@ AFLAGS_pacache.o := -traditional | |||
9 | 9 | ||
10 | obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ | 10 | obj-y := cache.o pacache.o setup.o traps.o time.o irq.o \ |
11 | pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ | 11 | pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \ |
12 | ptrace.o hardware.o inventory.o drivers.o semaphore.o \ | 12 | ptrace.o hardware.o inventory.o drivers.o \ |
13 | signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ | 13 | signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \ |
14 | process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ | 14 | process.o processor.o pdc_cons.o pdc_chassis.o unwind.o \ |
15 | topology.o | 15 | topology.o |
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 7aca704e96f0..5b7fc4aa044d 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c | |||
@@ -69,11 +69,6 @@ EXPORT_SYMBOL(memcpy_toio); | |||
69 | EXPORT_SYMBOL(memcpy_fromio); | 69 | EXPORT_SYMBOL(memcpy_fromio); |
70 | EXPORT_SYMBOL(memset_io); | 70 | EXPORT_SYMBOL(memset_io); |
71 | 71 | ||
72 | #include <asm/semaphore.h> | ||
73 | EXPORT_SYMBOL(__up); | ||
74 | EXPORT_SYMBOL(__down_interruptible); | ||
75 | EXPORT_SYMBOL(__down); | ||
76 | |||
77 | extern void $$divI(void); | 72 | extern void $$divI(void); |
78 | extern void $$divU(void); | 73 | extern void $$divU(void); |
79 | extern void $$remI(void); | 74 | extern void $$remI(void); |
diff --git a/arch/parisc/kernel/semaphore.c b/arch/parisc/kernel/semaphore.c deleted file mode 100644 index ee806bcc3726..000000000000 --- a/arch/parisc/kernel/semaphore.c +++ /dev/null | |||
@@ -1,102 +0,0 @@ | |||
1 | /* | ||
2 | * Semaphore implementation Copyright (c) 2001 Matthew Wilcox, Hewlett-Packard | ||
3 | */ | ||
4 | |||
5 | #include <linux/sched.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/errno.h> | ||
8 | #include <linux/init.h> | ||
9 | |||
10 | /* | ||
11 | * Semaphores are complex as we wish to avoid using two variables. | ||
12 | * `count' has multiple roles, depending on its value. If it is positive | ||
13 | * or zero, there are no waiters. The functions here will never be | ||
14 | * called; see <asm/semaphore.h> | ||
15 | * | ||
16 | * When count is -1 it indicates there is at least one task waiting | ||
17 | * for the semaphore. | ||
18 | * | ||
19 | * When count is less than that, there are '- count - 1' wakeups | ||
20 | * pending. ie if it has value -3, there are 2 wakeups pending. | ||
21 | * | ||
22 | * Note that these functions are only called when there is contention | ||
23 | * on the lock, and as such all this is the "non-critical" part of the | ||
24 | * whole semaphore business. The critical part is the inline stuff in | ||
25 | * <asm/semaphore.h> where we want to avoid any extra jumps and calls. | ||
26 | */ | ||
27 | void __up(struct semaphore *sem) | ||
28 | { | ||
29 | sem->count--; | ||
30 | wake_up(&sem->wait); | ||
31 | } | ||
32 | |||
33 | #define wakers(count) (-1 - count) | ||
34 | |||
35 | #define DOWN_HEAD \ | ||
36 | int ret = 0; \ | ||
37 | DECLARE_WAITQUEUE(wait, current); \ | ||
38 | \ | ||
39 | /* Note that someone is waiting */ \ | ||
40 | if (sem->count == 0) \ | ||
41 | sem->count = -1; \ | ||
42 | \ | ||
43 | /* protected by the sentry still -- use unlocked version */ \ | ||
44 | wait.flags = WQ_FLAG_EXCLUSIVE; \ | ||
45 | __add_wait_queue_tail(&sem->wait, &wait); \ | ||
46 | lost_race: \ | ||
47 | spin_unlock_irq(&sem->sentry); \ | ||
48 | |||
49 | #define DOWN_TAIL \ | ||
50 | spin_lock_irq(&sem->sentry); \ | ||
51 | if (wakers(sem->count) == 0 && ret == 0) \ | ||
52 | goto lost_race; /* Someone stole our wakeup */ \ | ||
53 | __remove_wait_queue(&sem->wait, &wait); \ | ||
54 | current->state = TASK_RUNNING; \ | ||
55 | if (!waitqueue_active(&sem->wait) && (sem->count < 0)) \ | ||
56 | sem->count = wakers(sem->count); | ||
57 | |||
58 | #define UPDATE_COUNT \ | ||
59 | sem->count += (sem->count < 0) ? 1 : - 1; | ||
60 | |||
61 | |||
62 | void __sched __down(struct semaphore * sem) | ||
63 | { | ||
64 | DOWN_HEAD | ||
65 | |||
66 | for(;;) { | ||
67 | set_task_state(current, TASK_UNINTERRUPTIBLE); | ||
68 | /* we can _read_ this without the sentry */ | ||
69 | if (sem->count != -1) | ||
70 | break; | ||
71 | schedule(); | ||
72 | } | ||
73 | |||
74 | DOWN_TAIL | ||
75 | UPDATE_COUNT | ||
76 | } | ||
77 | |||
78 | int __sched __down_interruptible(struct semaphore * sem) | ||
79 | { | ||
80 | DOWN_HEAD | ||
81 | |||
82 | for(;;) { | ||
83 | set_task_state(current, TASK_INTERRUPTIBLE); | ||
84 | /* we can _read_ this without the sentry */ | ||
85 | if (sem->count != -1) | ||
86 | break; | ||
87 | |||
88 | if (signal_pending(current)) { | ||
89 | ret = -EINTR; | ||
90 | break; | ||
91 | } | ||
92 | schedule(); | ||
93 | } | ||
94 | |||
95 | DOWN_TAIL | ||
96 | |||
97 | if (!ret) { | ||
98 | UPDATE_COUNT | ||
99 | } | ||
100 | |||
101 | return ret; | ||
102 | } | ||
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index c1baf9d5903f..b9dbfff9afe9 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -12,7 +12,7 @@ CFLAGS_prom_init.o += -fPIC | |||
12 | CFLAGS_btext.o += -fPIC | 12 | CFLAGS_btext.o += -fPIC |
13 | endif | 13 | endif |
14 | 14 | ||
15 | obj-y := semaphore.o cputable.o ptrace.o syscalls.o \ | 15 | obj-y := cputable.o ptrace.o syscalls.o \ |
16 | irq.o align.o signal_32.o pmc.o vdso.o \ | 16 | irq.o align.o signal_32.o pmc.o vdso.o \ |
17 | init_task.o process.o systbl.o idle.o \ | 17 | init_task.o process.o systbl.o idle.o \ |
18 | signal.o | 18 | signal.o |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 9c98424277a8..65d14e6ddc3c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/bitops.h> | 15 | #include <linux/bitops.h> |
16 | 16 | ||
17 | #include <asm/page.h> | 17 | #include <asm/page.h> |
18 | #include <asm/semaphore.h> | ||
19 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
20 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
21 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
diff --git a/arch/powerpc/kernel/semaphore.c b/arch/powerpc/kernel/semaphore.c deleted file mode 100644 index 2f8c3c951394..000000000000 --- a/arch/powerpc/kernel/semaphore.c +++ /dev/null | |||
@@ -1,135 +0,0 @@ | |||
1 | /* | ||
2 | * PowerPC-specific semaphore code. | ||
3 | * | ||
4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> | ||
12 | * to eliminate the SMP races in the old version between the updates | ||
13 | * of `count' and `waking'. Now we use negative `count' values to | ||
14 | * indicate that some process(es) are waiting for the semaphore. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/module.h> | ||
20 | |||
21 | #include <asm/atomic.h> | ||
22 | #include <asm/semaphore.h> | ||
23 | #include <asm/errno.h> | ||
24 | |||
25 | /* | ||
26 | * Atomically update sem->count. | ||
27 | * This does the equivalent of the following: | ||
28 | * | ||
29 | * old_count = sem->count; | ||
30 | * tmp = MAX(old_count, 0) + incr; | ||
31 | * sem->count = tmp; | ||
32 | * return old_count; | ||
33 | */ | ||
34 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
35 | { | ||
36 | int old_count, tmp; | ||
37 | |||
38 | __asm__ __volatile__("\n" | ||
39 | "1: lwarx %0,0,%3\n" | ||
40 | " srawi %1,%0,31\n" | ||
41 | " andc %1,%0,%1\n" | ||
42 | " add %1,%1,%4\n" | ||
43 | PPC405_ERR77(0,%3) | ||
44 | " stwcx. %1,0,%3\n" | ||
45 | " bne 1b" | ||
46 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
47 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | ||
48 | : "cc"); | ||
49 | |||
50 | return old_count; | ||
51 | } | ||
52 | |||
53 | void __up(struct semaphore *sem) | ||
54 | { | ||
55 | /* | ||
56 | * Note that we incremented count in up() before we came here, | ||
57 | * but that was ineffective since the result was <= 0, and | ||
58 | * any negative value of count is equivalent to 0. | ||
59 | * This ends up setting count to 1, unless count is now > 0 | ||
60 | * (i.e. because some other cpu has called up() in the meantime), | ||
61 | * in which case we just increment count. | ||
62 | */ | ||
63 | __sem_update_count(sem, 1); | ||
64 | wake_up(&sem->wait); | ||
65 | } | ||
66 | EXPORT_SYMBOL(__up); | ||
67 | |||
68 | /* | ||
69 | * Note that when we come in to __down or __down_interruptible, | ||
70 | * we have already decremented count, but that decrement was | ||
71 | * ineffective since the result was < 0, and any negative value | ||
72 | * of count is equivalent to 0. | ||
73 | * Thus it is only when we decrement count from some value > 0 | ||
74 | * that we have actually got the semaphore. | ||
75 | */ | ||
76 | void __sched __down(struct semaphore *sem) | ||
77 | { | ||
78 | struct task_struct *tsk = current; | ||
79 | DECLARE_WAITQUEUE(wait, tsk); | ||
80 | |||
81 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
82 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
83 | |||
84 | /* | ||
85 | * Try to get the semaphore. If the count is > 0, then we've | ||
86 | * got the semaphore; we decrement count and exit the loop. | ||
87 | * If the count is 0 or negative, we set it to -1, indicating | ||
88 | * that we are asleep, and then sleep. | ||
89 | */ | ||
90 | while (__sem_update_count(sem, -1) <= 0) { | ||
91 | schedule(); | ||
92 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
93 | } | ||
94 | remove_wait_queue(&sem->wait, &wait); | ||
95 | __set_task_state(tsk, TASK_RUNNING); | ||
96 | |||
97 | /* | ||
98 | * If there are any more sleepers, wake one of them up so | ||
99 | * that it can either get the semaphore, or set count to -1 | ||
100 | * indicating that there are still processes sleeping. | ||
101 | */ | ||
102 | wake_up(&sem->wait); | ||
103 | } | ||
104 | EXPORT_SYMBOL(__down); | ||
105 | |||
106 | int __sched __down_interruptible(struct semaphore * sem) | ||
107 | { | ||
108 | int retval = 0; | ||
109 | struct task_struct *tsk = current; | ||
110 | DECLARE_WAITQUEUE(wait, tsk); | ||
111 | |||
112 | __set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
113 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
114 | |||
115 | while (__sem_update_count(sem, -1) <= 0) { | ||
116 | if (signal_pending(current)) { | ||
117 | /* | ||
118 | * A signal is pending - give up trying. | ||
119 | * Set sem->count to 0 if it is negative, | ||
120 | * since we are no longer sleeping. | ||
121 | */ | ||
122 | __sem_update_count(sem, 0); | ||
123 | retval = -EINTR; | ||
124 | break; | ||
125 | } | ||
126 | schedule(); | ||
127 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
128 | } | ||
129 | remove_wait_queue(&sem->wait, &wait); | ||
130 | __set_task_state(tsk, TASK_RUNNING); | ||
131 | |||
132 | wake_up(&sem->wait); | ||
133 | return retval; | ||
134 | } | ||
135 | EXPORT_SYMBOL(__down_interruptible); | ||
diff --git a/arch/ppc/kernel/semaphore.c b/arch/ppc/kernel/semaphore.c deleted file mode 100644 index 2fe429b27c14..000000000000 --- a/arch/ppc/kernel/semaphore.c +++ /dev/null | |||
@@ -1,131 +0,0 @@ | |||
1 | /* | ||
2 | * PowerPC-specific semaphore code. | ||
3 | * | ||
4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> | ||
12 | * to eliminate the SMP races in the old version between the updates | ||
13 | * of `count' and `waking'. Now we use negative `count' values to | ||
14 | * indicate that some process(es) are waiting for the semaphore. | ||
15 | */ | ||
16 | |||
17 | #include <linux/sched.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <asm/semaphore.h> | ||
21 | #include <asm/errno.h> | ||
22 | |||
23 | /* | ||
24 | * Atomically update sem->count. | ||
25 | * This does the equivalent of the following: | ||
26 | * | ||
27 | * old_count = sem->count; | ||
28 | * tmp = MAX(old_count, 0) + incr; | ||
29 | * sem->count = tmp; | ||
30 | * return old_count; | ||
31 | */ | ||
32 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
33 | { | ||
34 | int old_count, tmp; | ||
35 | |||
36 | __asm__ __volatile__("\n" | ||
37 | "1: lwarx %0,0,%3\n" | ||
38 | " srawi %1,%0,31\n" | ||
39 | " andc %1,%0,%1\n" | ||
40 | " add %1,%1,%4\n" | ||
41 | PPC405_ERR77(0,%3) | ||
42 | " stwcx. %1,0,%3\n" | ||
43 | " bne 1b" | ||
44 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
45 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | ||
46 | : "cc"); | ||
47 | |||
48 | return old_count; | ||
49 | } | ||
50 | |||
51 | void __up(struct semaphore *sem) | ||
52 | { | ||
53 | /* | ||
54 | * Note that we incremented count in up() before we came here, | ||
55 | * but that was ineffective since the result was <= 0, and | ||
56 | * any negative value of count is equivalent to 0. | ||
57 | * This ends up setting count to 1, unless count is now > 0 | ||
58 | * (i.e. because some other cpu has called up() in the meantime), | ||
59 | * in which case we just increment count. | ||
60 | */ | ||
61 | __sem_update_count(sem, 1); | ||
62 | wake_up(&sem->wait); | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * Note that when we come in to __down or __down_interruptible, | ||
67 | * we have already decremented count, but that decrement was | ||
68 | * ineffective since the result was < 0, and any negative value | ||
69 | * of count is equivalent to 0. | ||
70 | * Thus it is only when we decrement count from some value > 0 | ||
71 | * that we have actually got the semaphore. | ||
72 | */ | ||
73 | void __sched __down(struct semaphore *sem) | ||
74 | { | ||
75 | struct task_struct *tsk = current; | ||
76 | DECLARE_WAITQUEUE(wait, tsk); | ||
77 | |||
78 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
79 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
80 | smp_wmb(); | ||
81 | |||
82 | /* | ||
83 | * Try to get the semaphore. If the count is > 0, then we've | ||
84 | * got the semaphore; we decrement count and exit the loop. | ||
85 | * If the count is 0 or negative, we set it to -1, indicating | ||
86 | * that we are asleep, and then sleep. | ||
87 | */ | ||
88 | while (__sem_update_count(sem, -1) <= 0) { | ||
89 | schedule(); | ||
90 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
91 | } | ||
92 | remove_wait_queue(&sem->wait, &wait); | ||
93 | tsk->state = TASK_RUNNING; | ||
94 | |||
95 | /* | ||
96 | * If there are any more sleepers, wake one of them up so | ||
97 | * that it can either get the semaphore, or set count to -1 | ||
98 | * indicating that there are still processes sleeping. | ||
99 | */ | ||
100 | wake_up(&sem->wait); | ||
101 | } | ||
102 | |||
103 | int __sched __down_interruptible(struct semaphore * sem) | ||
104 | { | ||
105 | int retval = 0; | ||
106 | struct task_struct *tsk = current; | ||
107 | DECLARE_WAITQUEUE(wait, tsk); | ||
108 | |||
109 | tsk->state = TASK_INTERRUPTIBLE; | ||
110 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
111 | smp_wmb(); | ||
112 | |||
113 | while (__sem_update_count(sem, -1) <= 0) { | ||
114 | if (signal_pending(current)) { | ||
115 | /* | ||
116 | * A signal is pending - give up trying. | ||
117 | * Set sem->count to 0 if it is negative, | ||
118 | * since we are no longer sleeping. | ||
119 | */ | ||
120 | __sem_update_count(sem, 0); | ||
121 | retval = -EINTR; | ||
122 | break; | ||
123 | } | ||
124 | schedule(); | ||
125 | tsk->state = TASK_INTERRUPTIBLE; | ||
126 | } | ||
127 | tsk->state = TASK_RUNNING; | ||
128 | remove_wait_queue(&sem->wait, &wait); | ||
129 | wake_up(&sem->wait); | ||
130 | return retval; | ||
131 | } | ||
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index ef2b2470c25c..77051cd27925 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -11,7 +11,7 @@ CFLAGS_smp.o := -Wno-nonnull | |||
11 | 11 | ||
12 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ | 12 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ |
13 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 13 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
14 | semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o | 14 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o |
15 | 15 | ||
16 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 16 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
17 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 17 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 7234c737f825..48238a114ce9 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c | |||
@@ -27,13 +27,6 @@ EXPORT_SYMBOL(_zb_findmap); | |||
27 | EXPORT_SYMBOL(_sb_findmap); | 27 | EXPORT_SYMBOL(_sb_findmap); |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * semaphore ops | ||
31 | */ | ||
32 | EXPORT_SYMBOL(__up); | ||
33 | EXPORT_SYMBOL(__down); | ||
34 | EXPORT_SYMBOL(__down_interruptible); | ||
35 | |||
36 | /* | ||
37 | * binfmt_elf loader | 30 | * binfmt_elf loader |
38 | */ | 31 | */ |
39 | extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); | 32 | extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs); |
diff --git a/arch/s390/kernel/semaphore.c b/arch/s390/kernel/semaphore.c deleted file mode 100644 index 191303f6c1d8..000000000000 --- a/arch/s390/kernel/semaphore.c +++ /dev/null | |||
@@ -1,108 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/s390/kernel/semaphore.c | ||
3 | * | ||
4 | * S390 version | ||
5 | * Copyright (C) 1998-2000 IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky | ||
7 | * | ||
8 | * Derived from "linux/arch/i386/kernel/semaphore.c | ||
9 | * Copyright (C) 1999, Linus Torvalds | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <asm/semaphore.h> | ||
17 | |||
18 | /* | ||
19 | * Atomically update sem->count. Equivalent to: | ||
20 | * old_val = sem->count.counter; | ||
21 | * new_val = ((old_val >= 0) ? old_val : 0) + incr; | ||
22 | * sem->count.counter = new_val; | ||
23 | * return old_val; | ||
24 | */ | ||
25 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
26 | { | ||
27 | int old_val, new_val; | ||
28 | |||
29 | asm volatile( | ||
30 | " l %0,0(%3)\n" | ||
31 | "0: ltr %1,%0\n" | ||
32 | " jhe 1f\n" | ||
33 | " lhi %1,0\n" | ||
34 | "1: ar %1,%4\n" | ||
35 | " cs %0,%1,0(%3)\n" | ||
36 | " jl 0b\n" | ||
37 | : "=&d" (old_val), "=&d" (new_val), "=m" (sem->count) | ||
38 | : "a" (&sem->count), "d" (incr), "m" (sem->count) | ||
39 | : "cc"); | ||
40 | return old_val; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * The inline function up() incremented count but the result | ||
45 | * was <= 0. This indicates that some process is waiting on | ||
46 | * the semaphore. The semaphore is free and we'll wake the | ||
47 | * first sleeping process, so we set count to 1 unless some | ||
48 | * other cpu has called up in the meantime in which case | ||
49 | * we just increment count by 1. | ||
50 | */ | ||
51 | void __up(struct semaphore *sem) | ||
52 | { | ||
53 | __sem_update_count(sem, 1); | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * The inline function down() decremented count and the result | ||
59 | * was < 0. The wait loop will atomically test and update the | ||
60 | * semaphore counter following the rules: | ||
61 | * count > 0: decrement count, wake up queue and exit. | ||
62 | * count <= 0: set count to -1, go to sleep. | ||
63 | */ | ||
64 | void __sched __down(struct semaphore * sem) | ||
65 | { | ||
66 | struct task_struct *tsk = current; | ||
67 | DECLARE_WAITQUEUE(wait, tsk); | ||
68 | |||
69 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
70 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
71 | while (__sem_update_count(sem, -1) <= 0) { | ||
72 | schedule(); | ||
73 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | ||
74 | } | ||
75 | remove_wait_queue(&sem->wait, &wait); | ||
76 | __set_task_state(tsk, TASK_RUNNING); | ||
77 | wake_up(&sem->wait); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Same as __down() with an additional test for signals. | ||
82 | * If a signal is pending the count is updated as follows: | ||
83 | * count > 0: wake up queue and exit. | ||
84 | * count <= 0: set count to 0, wake up queue and exit. | ||
85 | */ | ||
86 | int __sched __down_interruptible(struct semaphore * sem) | ||
87 | { | ||
88 | int retval = 0; | ||
89 | struct task_struct *tsk = current; | ||
90 | DECLARE_WAITQUEUE(wait, tsk); | ||
91 | |||
92 | __set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
93 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
94 | while (__sem_update_count(sem, -1) <= 0) { | ||
95 | if (signal_pending(current)) { | ||
96 | __sem_update_count(sem, 0); | ||
97 | retval = -EINTR; | ||
98 | break; | ||
99 | } | ||
100 | schedule(); | ||
101 | set_task_state(tsk, TASK_INTERRUPTIBLE); | ||
102 | } | ||
103 | remove_wait_queue(&sem->wait, &wait); | ||
104 | __set_task_state(tsk, TASK_RUNNING); | ||
105 | wake_up(&sem->wait); | ||
106 | return retval; | ||
107 | } | ||
108 | |||
diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 index 62bf373266f7..4bbdce36b92b 100644 --- a/arch/sh/kernel/Makefile_32 +++ b/arch/sh/kernel/Makefile_32 | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := head_32.o init_task.o vmlinux.lds | 5 | extra-y := head_32.o init_task.o vmlinux.lds |
6 | 6 | ||
7 | obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \ | 7 | obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \ |
8 | ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o sys_sh32.o \ | 8 | ptrace_32.o setup.o signal_32.o sys_sh.o sys_sh32.o \ |
9 | syscalls_32.o time_32.o topology.o traps.o traps_32.o | 9 | syscalls_32.o time_32.o topology.o traps.o traps_32.o |
10 | 10 | ||
11 | obj-y += cpu/ timers/ | 11 | obj-y += cpu/ timers/ |
diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 index e01283d49cbf..6edf53b93d94 100644 --- a/arch/sh/kernel/Makefile_64 +++ b/arch/sh/kernel/Makefile_64 | |||
@@ -1,7 +1,7 @@ | |||
1 | extra-y := head_64.o init_task.o vmlinux.lds | 1 | extra-y := head_64.o init_task.o vmlinux.lds |
2 | 2 | ||
3 | obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \ | 3 | obj-y := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \ |
4 | ptrace_64.o semaphore.o setup.o signal_64.o sys_sh.o sys_sh64.o \ | 4 | ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ |
5 | syscalls_64.o time_64.o topology.o traps.o traps_64.o | 5 | syscalls_64.o time_64.o topology.o traps.o traps_64.o |
6 | 6 | ||
7 | obj-y += cpu/ timers/ | 7 | obj-y += cpu/ timers/ |
diff --git a/arch/sh/kernel/semaphore.c b/arch/sh/kernel/semaphore.c deleted file mode 100644 index 184119eeae56..000000000000 --- a/arch/sh/kernel/semaphore.c +++ /dev/null | |||
@@ -1,139 +0,0 @@ | |||
1 | /* | ||
2 | * Just taken from alpha implementation. | ||
3 | * This can't work well, perhaps. | ||
4 | */ | ||
5 | /* | ||
6 | * Generic semaphore code. Buyer beware. Do your own | ||
7 | * specific changes in <asm/semaphore-helper.h> | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/wait.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <asm/semaphore.h> | ||
15 | #include <asm/semaphore-helper.h> | ||
16 | |||
17 | DEFINE_SPINLOCK(semaphore_wake_lock); | ||
18 | |||
19 | /* | ||
20 | * Semaphores are implemented using a two-way counter: | ||
21 | * The "count" variable is decremented for each process | ||
22 | * that tries to sleep, while the "waking" variable is | ||
23 | * incremented when the "up()" code goes to wake up waiting | ||
24 | * processes. | ||
25 | * | ||
26 | * Notably, the inline "up()" and "down()" functions can | ||
27 | * efficiently test if they need to do any extra work (up | ||
28 | * needs to do something only if count was negative before | ||
29 | * the increment operation. | ||
30 | * | ||
31 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
32 | * atomically. | ||
33 | * | ||
34 | * When __up() is called, the count was negative before | ||
35 | * incrementing it, and we need to wake up somebody. | ||
36 | * | ||
37 | * This routine adds one to the count of processes that need to | ||
38 | * wake up and exit. ALL waiting processes actually wake up but | ||
39 | * only the one that gets to the "waking" field first will gate | ||
40 | * through and acquire the semaphore. The others will go back | ||
41 | * to sleep. | ||
42 | * | ||
43 | * Note that these functions are only called when there is | ||
44 | * contention on the lock, and as such all this is the | ||
45 | * "non-critical" part of the whole semaphore business. The | ||
46 | * critical part is the inline stuff in <asm/semaphore.h> | ||
47 | * where we want to avoid any extra jumps and calls. | ||
48 | */ | ||
49 | void __up(struct semaphore *sem) | ||
50 | { | ||
51 | wake_one_more(sem); | ||
52 | wake_up(&sem->wait); | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Perform the "down" function. Return zero for semaphore acquired, | ||
57 | * return negative for signalled out of the function. | ||
58 | * | ||
59 | * If called from __down, the return is ignored and the wait loop is | ||
60 | * not interruptible. This means that a task waiting on a semaphore | ||
61 | * using "down()" cannot be killed until someone does an "up()" on | ||
62 | * the semaphore. | ||
63 | * | ||
64 | * If called from __down_interruptible, the return value gets checked | ||
65 | * upon return. If the return value is negative then the task continues | ||
66 | * with the negative value in the return register (it can be tested by | ||
67 | * the caller). | ||
68 | * | ||
69 | * Either form may be used in conjunction with "up()". | ||
70 | * | ||
71 | */ | ||
72 | |||
73 | #define DOWN_VAR \ | ||
74 | struct task_struct *tsk = current; \ | ||
75 | wait_queue_t wait; \ | ||
76 | init_waitqueue_entry(&wait, tsk); | ||
77 | |||
78 | #define DOWN_HEAD(task_state) \ | ||
79 | \ | ||
80 | \ | ||
81 | tsk->state = (task_state); \ | ||
82 | add_wait_queue(&sem->wait, &wait); \ | ||
83 | \ | ||
84 | /* \ | ||
85 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
86 | * so we must wait. \ | ||
87 | * \ | ||
88 | * We can let go the lock for purposes of waiting. \ | ||
89 | * We re-acquire it after awaking so as to protect \ | ||
90 | * all semaphore operations. \ | ||
91 | * \ | ||
92 | * If "up()" is called before we call waking_non_zero() then \ | ||
93 | * we will catch it right away. If it is called later then \ | ||
94 | * we will have to go through a wakeup cycle to catch it. \ | ||
95 | * \ | ||
96 | * Multiple waiters contend for the semaphore lock to see \ | ||
97 | * who gets to gate through and who has to wait some more. \ | ||
98 | */ \ | ||
99 | for (;;) { | ||
100 | |||
101 | #define DOWN_TAIL(task_state) \ | ||
102 | tsk->state = (task_state); \ | ||
103 | } \ | ||
104 | tsk->state = TASK_RUNNING; \ | ||
105 | remove_wait_queue(&sem->wait, &wait); | ||
106 | |||
107 | void __sched __down(struct semaphore * sem) | ||
108 | { | ||
109 | DOWN_VAR | ||
110 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
111 | if (waking_non_zero(sem)) | ||
112 | break; | ||
113 | schedule(); | ||
114 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
115 | } | ||
116 | |||
117 | int __sched __down_interruptible(struct semaphore * sem) | ||
118 | { | ||
119 | int ret = 0; | ||
120 | DOWN_VAR | ||
121 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
122 | |||
123 | ret = waking_non_zero_interruptible(sem, tsk); | ||
124 | if (ret) | ||
125 | { | ||
126 | if (ret == 1) | ||
127 | /* ret != 0 only if we get interrupted -arca */ | ||
128 | ret = 0; | ||
129 | break; | ||
130 | } | ||
131 | schedule(); | ||
132 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | int __down_trylock(struct semaphore * sem) | ||
137 | { | ||
138 | return waking_non_zero_trylock(sem); | ||
139 | } | ||
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 45bb333fd9ec..6d405462cee8 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/pci.h> | 9 | #include <linux/pci.h> |
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <asm/sections.h> | 11 | #include <asm/sections.h> |
12 | #include <asm/semaphore.h> | ||
13 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
14 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
15 | #include <asm/checksum.h> | 14 | #include <asm/checksum.h> |
@@ -48,12 +47,6 @@ EXPORT_SYMBOL(__copy_user); | |||
48 | EXPORT_SYMBOL(get_vm_area); | 47 | EXPORT_SYMBOL(get_vm_area); |
49 | #endif | 48 | #endif |
50 | 49 | ||
51 | /* semaphore exports */ | ||
52 | EXPORT_SYMBOL(__up); | ||
53 | EXPORT_SYMBOL(__down); | ||
54 | EXPORT_SYMBOL(__down_interruptible); | ||
55 | EXPORT_SYMBOL(__down_trylock); | ||
56 | |||
57 | EXPORT_SYMBOL(__udelay); | 50 | EXPORT_SYMBOL(__udelay); |
58 | EXPORT_SYMBOL(__ndelay); | 51 | EXPORT_SYMBOL(__ndelay); |
59 | EXPORT_SYMBOL(__const_udelay); | 52 | EXPORT_SYMBOL(__const_udelay); |
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c index b6410ce4bd1d..a310c9707f03 100644 --- a/arch/sh/kernel/sh_ksyms_64.c +++ b/arch/sh/kernel/sh_ksyms_64.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/in6.h> | 16 | #include <linux/in6.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/screen_info.h> | 18 | #include <linux/screen_info.h> |
19 | #include <asm/semaphore.h> | ||
20 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
21 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
22 | #include <asm/checksum.h> | 21 | #include <asm/checksum.h> |
@@ -37,9 +36,6 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); | |||
37 | EXPORT_SYMBOL(screen_info); | 36 | EXPORT_SYMBOL(screen_info); |
38 | #endif | 37 | #endif |
39 | 38 | ||
40 | EXPORT_SYMBOL(__down); | ||
41 | EXPORT_SYMBOL(__down_trylock); | ||
42 | EXPORT_SYMBOL(__up); | ||
43 | EXPORT_SYMBOL(__put_user_asm_l); | 39 | EXPORT_SYMBOL(__put_user_asm_l); |
44 | EXPORT_SYMBOL(__get_user_asm_l); | 40 | EXPORT_SYMBOL(__get_user_asm_l); |
45 | EXPORT_SYMBOL(copy_page); | 41 | EXPORT_SYMBOL(copy_page); |
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index bf1b15d3f6f5..2712bb166f6f 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
@@ -12,7 +12,7 @@ obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \ | |||
12 | sys_sparc.o sunos_asm.o systbls.o \ | 12 | sys_sparc.o sunos_asm.o systbls.o \ |
13 | time.o windows.o cpu.o devices.o sclow.o \ | 13 | time.o windows.o cpu.o devices.o sclow.o \ |
14 | tadpole.o tick14.o ptrace.o sys_solaris.o \ | 14 | tadpole.o tick14.o ptrace.o sys_solaris.o \ |
15 | unaligned.o una_asm.o muldiv.o semaphore.o \ | 15 | unaligned.o una_asm.o muldiv.o \ |
16 | prom.o of_device.o devres.o | 16 | prom.o of_device.o devres.o |
17 | 17 | ||
18 | devres-y = ../../../kernel/irq/devres.o | 18 | devres-y = ../../../kernel/irq/devres.o |
diff --git a/arch/sparc/kernel/semaphore.c b/arch/sparc/kernel/semaphore.c deleted file mode 100644 index 0c37c1a7cd7e..000000000000 --- a/arch/sparc/kernel/semaphore.c +++ /dev/null | |||
@@ -1,155 +0,0 @@ | |||
1 | /* $Id: semaphore.c,v 1.7 2001/04/18 21:06:05 davem Exp $ */ | ||
2 | |||
3 | /* sparc32 semaphore implementation, based on i386 version */ | ||
4 | |||
5 | #include <linux/sched.h> | ||
6 | #include <linux/errno.h> | ||
7 | #include <linux/init.h> | ||
8 | |||
9 | #include <asm/semaphore.h> | ||
10 | |||
11 | /* | ||
12 | * Semaphores are implemented using a two-way counter: | ||
13 | * The "count" variable is decremented for each process | ||
14 | * that tries to acquire the semaphore, while the "sleeping" | ||
15 | * variable is a count of such acquires. | ||
16 | * | ||
17 | * Notably, the inline "up()" and "down()" functions can | ||
18 | * efficiently test if they need to do any extra work (up | ||
19 | * needs to do something only if count was negative before | ||
20 | * the increment operation. | ||
21 | * | ||
22 | * "sleeping" and the contention routine ordering is | ||
23 | * protected by the semaphore spinlock. | ||
24 | * | ||
25 | * Note that these functions are only called when there is | ||
26 | * contention on the lock, and as such all this is the | ||
27 | * "non-critical" part of the whole semaphore business. The | ||
28 | * critical part is the inline stuff in <asm/semaphore.h> | ||
29 | * where we want to avoid any extra jumps and calls. | ||
30 | */ | ||
31 | |||
32 | /* | ||
33 | * Logic: | ||
34 | * - only on a boundary condition do we need to care. When we go | ||
35 | * from a negative count to a non-negative, we wake people up. | ||
36 | * - when we go from a non-negative count to a negative do we | ||
37 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
38 | * that we're on the wakeup list before we synchronize so that | ||
39 | * we cannot lose wakeup events. | ||
40 | */ | ||
41 | |||
42 | void __up(struct semaphore *sem) | ||
43 | { | ||
44 | wake_up(&sem->wait); | ||
45 | } | ||
46 | |||
47 | static DEFINE_SPINLOCK(semaphore_lock); | ||
48 | |||
49 | void __sched __down(struct semaphore * sem) | ||
50 | { | ||
51 | struct task_struct *tsk = current; | ||
52 | DECLARE_WAITQUEUE(wait, tsk); | ||
53 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
54 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
55 | |||
56 | spin_lock_irq(&semaphore_lock); | ||
57 | sem->sleepers++; | ||
58 | for (;;) { | ||
59 | int sleepers = sem->sleepers; | ||
60 | |||
61 | /* | ||
62 | * Add "everybody else" into it. They aren't | ||
63 | * playing, because we own the spinlock. | ||
64 | */ | ||
65 | if (!atomic24_add_negative(sleepers - 1, &sem->count)) { | ||
66 | sem->sleepers = 0; | ||
67 | break; | ||
68 | } | ||
69 | sem->sleepers = 1; /* us - see -1 above */ | ||
70 | spin_unlock_irq(&semaphore_lock); | ||
71 | |||
72 | schedule(); | ||
73 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
74 | spin_lock_irq(&semaphore_lock); | ||
75 | } | ||
76 | spin_unlock_irq(&semaphore_lock); | ||
77 | remove_wait_queue(&sem->wait, &wait); | ||
78 | tsk->state = TASK_RUNNING; | ||
79 | wake_up(&sem->wait); | ||
80 | } | ||
81 | |||
82 | int __sched __down_interruptible(struct semaphore * sem) | ||
83 | { | ||
84 | int retval = 0; | ||
85 | struct task_struct *tsk = current; | ||
86 | DECLARE_WAITQUEUE(wait, tsk); | ||
87 | tsk->state = TASK_INTERRUPTIBLE; | ||
88 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
89 | |||
90 | spin_lock_irq(&semaphore_lock); | ||
91 | sem->sleepers ++; | ||
92 | for (;;) { | ||
93 | int sleepers = sem->sleepers; | ||
94 | |||
95 | /* | ||
96 | * With signals pending, this turns into | ||
97 | * the trylock failure case - we won't be | ||
98 | * sleeping, and we* can't get the lock as | ||
99 | * it has contention. Just correct the count | ||
100 | * and exit. | ||
101 | */ | ||
102 | if (signal_pending(current)) { | ||
103 | retval = -EINTR; | ||
104 | sem->sleepers = 0; | ||
105 | atomic24_add(sleepers, &sem->count); | ||
106 | break; | ||
107 | } | ||
108 | |||
109 | /* | ||
110 | * Add "everybody else" into it. They aren't | ||
111 | * playing, because we own the spinlock. The | ||
112 | * "-1" is because we're still hoping to get | ||
113 | * the lock. | ||
114 | */ | ||
115 | if (!atomic24_add_negative(sleepers - 1, &sem->count)) { | ||
116 | sem->sleepers = 0; | ||
117 | break; | ||
118 | } | ||
119 | sem->sleepers = 1; /* us - see -1 above */ | ||
120 | spin_unlock_irq(&semaphore_lock); | ||
121 | |||
122 | schedule(); | ||
123 | tsk->state = TASK_INTERRUPTIBLE; | ||
124 | spin_lock_irq(&semaphore_lock); | ||
125 | } | ||
126 | spin_unlock_irq(&semaphore_lock); | ||
127 | tsk->state = TASK_RUNNING; | ||
128 | remove_wait_queue(&sem->wait, &wait); | ||
129 | wake_up(&sem->wait); | ||
130 | return retval; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * Trylock failed - make sure we correct for | ||
135 | * having decremented the count. | ||
136 | */ | ||
137 | int __down_trylock(struct semaphore * sem) | ||
138 | { | ||
139 | int sleepers; | ||
140 | unsigned long flags; | ||
141 | |||
142 | spin_lock_irqsave(&semaphore_lock, flags); | ||
143 | sleepers = sem->sleepers + 1; | ||
144 | sem->sleepers = 0; | ||
145 | |||
146 | /* | ||
147 | * Add "everybody else" and us into it. They aren't | ||
148 | * playing, because we own the spinlock. | ||
149 | */ | ||
150 | if (!atomic24_add_negative(sleepers, &sem->count)) | ||
151 | wake_up(&sem->wait); | ||
152 | |||
153 | spin_unlock_irqrestore(&semaphore_lock, flags); | ||
154 | return 1; | ||
155 | } | ||
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index c1025e551650..97b1de0e9094 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c | |||
@@ -107,11 +107,6 @@ EXPORT_SYMBOL(___rw_read_try); | |||
107 | EXPORT_SYMBOL(___rw_read_exit); | 107 | EXPORT_SYMBOL(___rw_read_exit); |
108 | EXPORT_SYMBOL(___rw_write_enter); | 108 | EXPORT_SYMBOL(___rw_write_enter); |
109 | #endif | 109 | #endif |
110 | /* semaphores */ | ||
111 | EXPORT_SYMBOL(__up); | ||
112 | EXPORT_SYMBOL(__down); | ||
113 | EXPORT_SYMBOL(__down_trylock); | ||
114 | EXPORT_SYMBOL(__down_interruptible); | ||
115 | 110 | ||
116 | EXPORT_SYMBOL(sparc_valid_addr_bitmap); | 111 | EXPORT_SYMBOL(sparc_valid_addr_bitmap); |
117 | EXPORT_SYMBOL(phys_base); | 112 | EXPORT_SYMBOL(phys_base); |
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 1bf5b187de49..459462e80a12 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile | |||
@@ -10,7 +10,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
10 | obj-y := process.o setup.o cpu.o idprom.o \ | 10 | obj-y := process.o setup.o cpu.o idprom.o \ |
11 | traps.o auxio.o una_asm.o sysfs.o iommu.o \ | 11 | traps.o auxio.o una_asm.o sysfs.o iommu.o \ |
12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ | 12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ |
13 | unaligned.o central.o pci.o starfire.o semaphore.o \ | 13 | unaligned.o central.o pci.o starfire.o \ |
14 | power.o sbus.o sparc64_ksyms.o chmc.o \ | 14 | power.o sbus.o sparc64_ksyms.o chmc.o \ |
15 | visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o | 15 | visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o |
16 | 16 | ||
diff --git a/arch/sparc64/kernel/semaphore.c b/arch/sparc64/kernel/semaphore.c deleted file mode 100644 index 9974a6899551..000000000000 --- a/arch/sparc64/kernel/semaphore.c +++ /dev/null | |||
@@ -1,254 +0,0 @@ | |||
1 | /* semaphore.c: Sparc64 semaphore implementation. | ||
2 | * | ||
3 | * This is basically the PPC semaphore scheme ported to use | ||
4 | * the sparc64 atomic instructions, so see the PPC code for | ||
5 | * credits. | ||
6 | */ | ||
7 | |||
8 | #include <linux/sched.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/init.h> | ||
11 | |||
12 | /* | ||
13 | * Atomically update sem->count. | ||
14 | * This does the equivalent of the following: | ||
15 | * | ||
16 | * old_count = sem->count; | ||
17 | * tmp = MAX(old_count, 0) + incr; | ||
18 | * sem->count = tmp; | ||
19 | * return old_count; | ||
20 | */ | ||
21 | static inline int __sem_update_count(struct semaphore *sem, int incr) | ||
22 | { | ||
23 | int old_count, tmp; | ||
24 | |||
25 | __asm__ __volatile__("\n" | ||
26 | " ! __sem_update_count old_count(%0) tmp(%1) incr(%4) &sem->count(%3)\n" | ||
27 | "1: ldsw [%3], %0\n" | ||
28 | " mov %0, %1\n" | ||
29 | " cmp %0, 0\n" | ||
30 | " movl %%icc, 0, %1\n" | ||
31 | " add %1, %4, %1\n" | ||
32 | " cas [%3], %0, %1\n" | ||
33 | " cmp %0, %1\n" | ||
34 | " membar #StoreLoad | #StoreStore\n" | ||
35 | " bne,pn %%icc, 1b\n" | ||
36 | " nop\n" | ||
37 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) | ||
38 | : "r" (&sem->count), "r" (incr), "m" (sem->count) | ||
39 | : "cc"); | ||
40 | |||
41 | return old_count; | ||
42 | } | ||
43 | |||
44 | static void __up(struct semaphore *sem) | ||
45 | { | ||
46 | __sem_update_count(sem, 1); | ||
47 | wake_up(&sem->wait); | ||
48 | } | ||
49 | |||
50 | void up(struct semaphore *sem) | ||
51 | { | ||
52 | /* This atomically does: | ||
53 | * old_val = sem->count; | ||
54 | * new_val = sem->count + 1; | ||
55 | * sem->count = new_val; | ||
56 | * if (old_val < 0) | ||
57 | * __up(sem); | ||
58 | * | ||
59 | * The (old_val < 0) test is equivalent to | ||
60 | * the more straightforward (new_val <= 0), | ||
61 | * but it is easier to test the former because | ||
62 | * of how the CAS instruction works. | ||
63 | */ | ||
64 | |||
65 | __asm__ __volatile__("\n" | ||
66 | " ! up sem(%0)\n" | ||
67 | " membar #StoreLoad | #LoadLoad\n" | ||
68 | "1: lduw [%0], %%g1\n" | ||
69 | " add %%g1, 1, %%g7\n" | ||
70 | " cas [%0], %%g1, %%g7\n" | ||
71 | " cmp %%g1, %%g7\n" | ||
72 | " bne,pn %%icc, 1b\n" | ||
73 | " addcc %%g7, 1, %%g0\n" | ||
74 | " membar #StoreLoad | #StoreStore\n" | ||
75 | " ble,pn %%icc, 3f\n" | ||
76 | " nop\n" | ||
77 | "2:\n" | ||
78 | " .subsection 2\n" | ||
79 | "3: mov %0, %%g1\n" | ||
80 | " save %%sp, -160, %%sp\n" | ||
81 | " call %1\n" | ||
82 | " mov %%g1, %%o0\n" | ||
83 | " ba,pt %%xcc, 2b\n" | ||
84 | " restore\n" | ||
85 | " .previous\n" | ||
86 | : : "r" (sem), "i" (__up) | ||
87 | : "g1", "g2", "g3", "g7", "memory", "cc"); | ||
88 | } | ||
89 | |||
90 | static void __sched __down(struct semaphore * sem) | ||
91 | { | ||
92 | struct task_struct *tsk = current; | ||
93 | DECLARE_WAITQUEUE(wait, tsk); | ||
94 | |||
95 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
96 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
97 | |||
98 | while (__sem_update_count(sem, -1) <= 0) { | ||
99 | schedule(); | ||
100 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
101 | } | ||
102 | remove_wait_queue(&sem->wait, &wait); | ||
103 | tsk->state = TASK_RUNNING; | ||
104 | |||
105 | wake_up(&sem->wait); | ||
106 | } | ||
107 | |||
108 | void __sched down(struct semaphore *sem) | ||
109 | { | ||
110 | might_sleep(); | ||
111 | /* This atomically does: | ||
112 | * old_val = sem->count; | ||
113 | * new_val = sem->count - 1; | ||
114 | * sem->count = new_val; | ||
115 | * if (old_val < 1) | ||
116 | * __down(sem); | ||
117 | * | ||
118 | * The (old_val < 1) test is equivalent to | ||
119 | * the more straightforward (new_val < 0), | ||
120 | * but it is easier to test the former because | ||
121 | * of how the CAS instruction works. | ||
122 | */ | ||
123 | |||
124 | __asm__ __volatile__("\n" | ||
125 | " ! down sem(%0)\n" | ||
126 | "1: lduw [%0], %%g1\n" | ||
127 | " sub %%g1, 1, %%g7\n" | ||
128 | " cas [%0], %%g1, %%g7\n" | ||
129 | " cmp %%g1, %%g7\n" | ||
130 | " bne,pn %%icc, 1b\n" | ||
131 | " cmp %%g7, 1\n" | ||
132 | " membar #StoreLoad | #StoreStore\n" | ||
133 | " bl,pn %%icc, 3f\n" | ||
134 | " nop\n" | ||
135 | "2:\n" | ||
136 | " .subsection 2\n" | ||
137 | "3: mov %0, %%g1\n" | ||
138 | " save %%sp, -160, %%sp\n" | ||
139 | " call %1\n" | ||
140 | " mov %%g1, %%o0\n" | ||
141 | " ba,pt %%xcc, 2b\n" | ||
142 | " restore\n" | ||
143 | " .previous\n" | ||
144 | : : "r" (sem), "i" (__down) | ||
145 | : "g1", "g2", "g3", "g7", "memory", "cc"); | ||
146 | } | ||
147 | |||
148 | int down_trylock(struct semaphore *sem) | ||
149 | { | ||
150 | int ret; | ||
151 | |||
152 | /* This atomically does: | ||
153 | * old_val = sem->count; | ||
154 | * new_val = sem->count - 1; | ||
155 | * if (old_val < 1) { | ||
156 | * ret = 1; | ||
157 | * } else { | ||
158 | * sem->count = new_val; | ||
159 | * ret = 0; | ||
160 | * } | ||
161 | * | ||
162 | * The (old_val < 1) test is equivalent to | ||
163 | * the more straightforward (new_val < 0), | ||
164 | * but it is easier to test the former because | ||
165 | * of how the CAS instruction works. | ||
166 | */ | ||
167 | |||
168 | __asm__ __volatile__("\n" | ||
169 | " ! down_trylock sem(%1) ret(%0)\n" | ||
170 | "1: lduw [%1], %%g1\n" | ||
171 | " sub %%g1, 1, %%g7\n" | ||
172 | " cmp %%g1, 1\n" | ||
173 | " bl,pn %%icc, 2f\n" | ||
174 | " mov 1, %0\n" | ||
175 | " cas [%1], %%g1, %%g7\n" | ||
176 | " cmp %%g1, %%g7\n" | ||
177 | " bne,pn %%icc, 1b\n" | ||
178 | " mov 0, %0\n" | ||
179 | " membar #StoreLoad | #StoreStore\n" | ||
180 | "2:\n" | ||
181 | : "=&r" (ret) | ||
182 | : "r" (sem) | ||
183 | : "g1", "g7", "memory", "cc"); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static int __sched __down_interruptible(struct semaphore * sem) | ||
189 | { | ||
190 | int retval = 0; | ||
191 | struct task_struct *tsk = current; | ||
192 | DECLARE_WAITQUEUE(wait, tsk); | ||
193 | |||
194 | tsk->state = TASK_INTERRUPTIBLE; | ||
195 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
196 | |||
197 | while (__sem_update_count(sem, -1) <= 0) { | ||
198 | if (signal_pending(current)) { | ||
199 | __sem_update_count(sem, 0); | ||
200 | retval = -EINTR; | ||
201 | break; | ||
202 | } | ||
203 | schedule(); | ||
204 | tsk->state = TASK_INTERRUPTIBLE; | ||
205 | } | ||
206 | tsk->state = TASK_RUNNING; | ||
207 | remove_wait_queue(&sem->wait, &wait); | ||
208 | wake_up(&sem->wait); | ||
209 | return retval; | ||
210 | } | ||
211 | |||
212 | int __sched down_interruptible(struct semaphore *sem) | ||
213 | { | ||
214 | int ret = 0; | ||
215 | |||
216 | might_sleep(); | ||
217 | /* This atomically does: | ||
218 | * old_val = sem->count; | ||
219 | * new_val = sem->count - 1; | ||
220 | * sem->count = new_val; | ||
221 | * if (old_val < 1) | ||
222 | * ret = __down_interruptible(sem); | ||
223 | * | ||
224 | * The (old_val < 1) test is equivalent to | ||
225 | * the more straightforward (new_val < 0), | ||
226 | * but it is easier to test the former because | ||
227 | * of how the CAS instruction works. | ||
228 | */ | ||
229 | |||
230 | __asm__ __volatile__("\n" | ||
231 | " ! down_interruptible sem(%2) ret(%0)\n" | ||
232 | "1: lduw [%2], %%g1\n" | ||
233 | " sub %%g1, 1, %%g7\n" | ||
234 | " cas [%2], %%g1, %%g7\n" | ||
235 | " cmp %%g1, %%g7\n" | ||
236 | " bne,pn %%icc, 1b\n" | ||
237 | " cmp %%g7, 1\n" | ||
238 | " membar #StoreLoad | #StoreStore\n" | ||
239 | " bl,pn %%icc, 3f\n" | ||
240 | " nop\n" | ||
241 | "2:\n" | ||
242 | " .subsection 2\n" | ||
243 | "3: mov %2, %%g1\n" | ||
244 | " save %%sp, -160, %%sp\n" | ||
245 | " call %3\n" | ||
246 | " mov %%g1, %%o0\n" | ||
247 | " ba,pt %%xcc, 2b\n" | ||
248 | " restore\n" | ||
249 | " .previous\n" | ||
250 | : "=r" (ret) | ||
251 | : "0" (ret), "r" (sem), "i" (__down_interruptible) | ||
252 | : "g1", "g2", "g3", "g7", "memory", "cc"); | ||
253 | return ret; | ||
254 | } | ||
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 51fa773f38c9..051b8d9cb989 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -130,12 +130,6 @@ EXPORT_SYMBOL(_mcount); | |||
130 | 130 | ||
131 | EXPORT_SYMBOL(sparc64_get_clock_tick); | 131 | EXPORT_SYMBOL(sparc64_get_clock_tick); |
132 | 132 | ||
133 | /* semaphores */ | ||
134 | EXPORT_SYMBOL(down); | ||
135 | EXPORT_SYMBOL(down_trylock); | ||
136 | EXPORT_SYMBOL(down_interruptible); | ||
137 | EXPORT_SYMBOL(up); | ||
138 | |||
139 | /* RW semaphores */ | 133 | /* RW semaphores */ |
140 | EXPORT_SYMBOL(__down_read); | 134 | EXPORT_SYMBOL(__down_read); |
141 | EXPORT_SYMBOL(__down_read_trylock); | 135 | EXPORT_SYMBOL(__down_read_trylock); |
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index 3cd8a04d66d8..e09edfa560da 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 | |||
@@ -19,10 +19,6 @@ config 64BIT | |||
19 | bool | 19 | bool |
20 | default n | 20 | default n |
21 | 21 | ||
22 | config SEMAPHORE_SLEEPERS | ||
23 | bool | ||
24 | default y | ||
25 | |||
26 | config 3_LEVEL_PGTABLES | 22 | config 3_LEVEL_PGTABLES |
27 | bool "Three-level pagetables (EXPERIMENTAL)" | 23 | bool "Three-level pagetables (EXPERIMENTAL)" |
28 | default n | 24 | default n |
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64 index 6533b349f061..3fbe69e359ed 100644 --- a/arch/um/Kconfig.x86_64 +++ b/arch/um/Kconfig.x86_64 | |||
@@ -11,10 +11,6 @@ config RWSEM_GENERIC_SPINLOCK | |||
11 | bool | 11 | bool |
12 | default y | 12 | default y |
13 | 13 | ||
14 | config SEMAPHORE_SLEEPERS | ||
15 | bool | ||
16 | default y | ||
17 | |||
18 | config 3_LEVEL_PGTABLES | 14 | config 3_LEVEL_PGTABLES |
19 | bool | 15 | bool |
20 | default y | 16 | default y |
diff --git a/arch/um/sys-i386/ksyms.c b/arch/um/sys-i386/ksyms.c index 2a1eac1859ce..bfbefd30db8f 100644 --- a/arch/um/sys-i386/ksyms.c +++ b/arch/um/sys-i386/ksyms.c | |||
@@ -1,17 +1,5 @@ | |||
1 | #include "linux/module.h" | 1 | #include "linux/module.h" |
2 | #include "linux/in6.h" | ||
3 | #include "linux/rwsem.h" | ||
4 | #include "asm/byteorder.h" | ||
5 | #include "asm/delay.h" | ||
6 | #include "asm/semaphore.h" | ||
7 | #include "asm/uaccess.h" | ||
8 | #include "asm/checksum.h" | 2 | #include "asm/checksum.h" |
9 | #include "asm/errno.h" | ||
10 | |||
11 | EXPORT_SYMBOL(__down_failed); | ||
12 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
13 | EXPORT_SYMBOL(__down_failed_trylock); | ||
14 | EXPORT_SYMBOL(__up_wakeup); | ||
15 | 3 | ||
16 | /* Networking helper routines. */ | 4 | /* Networking helper routines. */ |
17 | EXPORT_SYMBOL(csum_partial); | 5 | EXPORT_SYMBOL(csum_partial); |
diff --git a/arch/um/sys-ppc/Makefile b/arch/um/sys-ppc/Makefile index 08901526e893..b8bc844fd2c4 100644 --- a/arch/um/sys-ppc/Makefile +++ b/arch/um/sys-ppc/Makefile | |||
@@ -3,7 +3,7 @@ OBJ = built-in.o | |||
3 | .S.o: | 3 | .S.o: |
4 | $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o | 4 | $(CC) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o |
5 | 5 | ||
6 | OBJS = ptrace.o sigcontext.o semaphore.o checksum.o miscthings.o misc.o \ | 6 | OBJS = ptrace.o sigcontext.o checksum.o miscthings.o misc.o \ |
7 | ptrace_user.o sysrq.o | 7 | ptrace_user.o sysrq.o |
8 | 8 | ||
9 | EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel | 9 | EXTRA_AFLAGS := -DCONFIG_PPC32 -I. -I$(srctree)/arch/ppc/kernel |
@@ -20,10 +20,6 @@ ptrace_user.o: ptrace_user.c | |||
20 | sigcontext.o: sigcontext.c | 20 | sigcontext.o: sigcontext.c |
21 | $(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $< | 21 | $(CC) $(USER_CFLAGS) $(EXTRA_CFLAGS) -c -o $@ $< |
22 | 22 | ||
23 | semaphore.c: | ||
24 | rm -f $@ | ||
25 | ln -s $(srctree)/arch/ppc/kernel/$@ $@ | ||
26 | |||
27 | checksum.S: | 23 | checksum.S: |
28 | rm -f $@ | 24 | rm -f $@ |
29 | ln -s $(srctree)/arch/ppc/lib/$@ $@ | 25 | ln -s $(srctree)/arch/ppc/lib/$@ $@ |
@@ -66,4 +62,4 @@ misc.o: misc.S ppc_defs.h | |||
66 | $(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o | 62 | $(CC) $(EXTRA_AFLAGS) $(KBUILD_AFLAGS) -D__ASSEMBLY__ -D__UM_PPC__ -c $< -o $*.o |
67 | rm -f asm | 63 | rm -f asm |
68 | 64 | ||
69 | clean-files := $(OBJS) ppc_defs.h checksum.S semaphore.c mk_defs.c | 65 | clean-files := $(OBJS) ppc_defs.h checksum.S mk_defs.c |
diff --git a/arch/um/sys-x86_64/ksyms.c b/arch/um/sys-x86_64/ksyms.c index 12c593607c59..4d7d1a812d8f 100644 --- a/arch/um/sys-x86_64/ksyms.c +++ b/arch/um/sys-x86_64/ksyms.c | |||
@@ -1,16 +1,5 @@ | |||
1 | #include "linux/module.h" | 1 | #include "linux/module.h" |
2 | #include "linux/in6.h" | 2 | #include "asm/string.h" |
3 | #include "linux/rwsem.h" | ||
4 | #include "asm/byteorder.h" | ||
5 | #include "asm/semaphore.h" | ||
6 | #include "asm/uaccess.h" | ||
7 | #include "asm/checksum.h" | ||
8 | #include "asm/errno.h" | ||
9 | |||
10 | EXPORT_SYMBOL(__down_failed); | ||
11 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
12 | EXPORT_SYMBOL(__down_failed_trylock); | ||
13 | EXPORT_SYMBOL(__up_wakeup); | ||
14 | 3 | ||
15 | /*XXX: we need them because they would be exported by x86_64 */ | 4 | /*XXX: we need them because they would be exported by x86_64 */ |
16 | EXPORT_SYMBOL(__memcpy); | 5 | EXPORT_SYMBOL(__memcpy); |
diff --git a/arch/v850/kernel/Makefile b/arch/v850/kernel/Makefile index 3930482bddc4..da5889c53576 100644 --- a/arch/v850/kernel/Makefile +++ b/arch/v850/kernel/Makefile | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | extra-y := head.o init_task.o vmlinux.lds | 12 | extra-y := head.o init_task.o vmlinux.lds |
13 | 13 | ||
14 | obj-y += intv.o entry.o process.o syscalls.o time.o semaphore.o setup.o \ | 14 | obj-y += intv.o entry.o process.o syscalls.o time.o setup.o \ |
15 | signal.o irq.o mach.o ptrace.o bug.o | 15 | signal.o irq.o mach.o ptrace.o bug.o |
16 | obj-$(CONFIG_MODULES) += module.o v850_ksyms.o | 16 | obj-$(CONFIG_MODULES) += module.o v850_ksyms.o |
17 | # chip-specific code | 17 | # chip-specific code |
diff --git a/arch/v850/kernel/semaphore.c b/arch/v850/kernel/semaphore.c deleted file mode 100644 index fc89fd661c99..000000000000 --- a/arch/v850/kernel/semaphore.c +++ /dev/null | |||
@@ -1,166 +0,0 @@ | |||
1 | /* | ||
2 | * arch/v850/kernel/semaphore.c -- Semaphore support | ||
3 | * | ||
4 | * Copyright (C) 1998-2000 IBM Corporation | ||
5 | * Copyright (C) 1999 Linus Torvalds | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General | ||
8 | * Public License. See the file COPYING in the main directory of this | ||
9 | * archive for more details. | ||
10 | * | ||
11 | * This file is a copy of the s390 version, arch/s390/kernel/semaphore.c | ||
12 | * Author(s): Martin Schwidefsky | ||
13 | * which was derived from the i386 version, linux/arch/i386/kernel/semaphore.c | ||
14 | */ | ||
15 | |||
16 | #include <linux/errno.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/init.h> | ||
19 | |||
20 | #include <asm/semaphore.h> | ||
21 | |||
22 | /* | ||
23 | * Semaphores are implemented using a two-way counter: | ||
24 | * The "count" variable is decremented for each process | ||
25 | * that tries to acquire the semaphore, while the "sleeping" | ||
26 | * variable is a count of such acquires. | ||
27 | * | ||
28 | * Notably, the inline "up()" and "down()" functions can | ||
29 | * efficiently test if they need to do any extra work (up | ||
30 | * needs to do something only if count was negative before | ||
31 | * the increment operation. | ||
32 | * | ||
33 | * "sleeping" and the contention routine ordering is | ||
34 | * protected by the semaphore spinlock. | ||
35 | * | ||
36 | * Note that these functions are only called when there is | ||
37 | * contention on the lock, and as such all this is the | ||
38 | * "non-critical" part of the whole semaphore business. The | ||
39 | * critical part is the inline stuff in <asm/semaphore.h> | ||
40 | * where we want to avoid any extra jumps and calls. | ||
41 | */ | ||
42 | |||
43 | /* | ||
44 | * Logic: | ||
45 | * - only on a boundary condition do we need to care. When we go | ||
46 | * from a negative count to a non-negative, we wake people up. | ||
47 | * - when we go from a non-negative count to a negative do we | ||
48 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
49 | * that we're on the wakeup list before we synchronize so that | ||
50 | * we cannot lose wakeup events. | ||
51 | */ | ||
52 | |||
53 | void __up(struct semaphore *sem) | ||
54 | { | ||
55 | wake_up(&sem->wait); | ||
56 | } | ||
57 | |||
58 | static DEFINE_SPINLOCK(semaphore_lock); | ||
59 | |||
60 | void __sched __down(struct semaphore * sem) | ||
61 | { | ||
62 | struct task_struct *tsk = current; | ||
63 | DECLARE_WAITQUEUE(wait, tsk); | ||
64 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
65 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
66 | |||
67 | spin_lock_irq(&semaphore_lock); | ||
68 | sem->sleepers++; | ||
69 | for (;;) { | ||
70 | int sleepers = sem->sleepers; | ||
71 | |||
72 | /* | ||
73 | * Add "everybody else" into it. They aren't | ||
74 | * playing, because we own the spinlock. | ||
75 | */ | ||
76 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
77 | sem->sleepers = 0; | ||
78 | break; | ||
79 | } | ||
80 | sem->sleepers = 1; /* us - see -1 above */ | ||
81 | spin_unlock_irq(&semaphore_lock); | ||
82 | |||
83 | schedule(); | ||
84 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
85 | spin_lock_irq(&semaphore_lock); | ||
86 | } | ||
87 | spin_unlock_irq(&semaphore_lock); | ||
88 | remove_wait_queue(&sem->wait, &wait); | ||
89 | tsk->state = TASK_RUNNING; | ||
90 | wake_up(&sem->wait); | ||
91 | } | ||
92 | |||
93 | int __sched __down_interruptible(struct semaphore * sem) | ||
94 | { | ||
95 | int retval = 0; | ||
96 | struct task_struct *tsk = current; | ||
97 | DECLARE_WAITQUEUE(wait, tsk); | ||
98 | tsk->state = TASK_INTERRUPTIBLE; | ||
99 | add_wait_queue_exclusive(&sem->wait, &wait); | ||
100 | |||
101 | spin_lock_irq(&semaphore_lock); | ||
102 | sem->sleepers ++; | ||
103 | for (;;) { | ||
104 | int sleepers = sem->sleepers; | ||
105 | |||
106 | /* | ||
107 | * With signals pending, this turns into | ||
108 | * the trylock failure case - we won't be | ||
109 | * sleeping, and we* can't get the lock as | ||
110 | * it has contention. Just correct the count | ||
111 | * and exit. | ||
112 | */ | ||
113 | if (signal_pending(current)) { | ||
114 | retval = -EINTR; | ||
115 | sem->sleepers = 0; | ||
116 | atomic_add(sleepers, &sem->count); | ||
117 | break; | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Add "everybody else" into it. They aren't | ||
122 | * playing, because we own the spinlock. The | ||
123 | * "-1" is because we're still hoping to get | ||
124 | * the lock. | ||
125 | */ | ||
126 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
127 | sem->sleepers = 0; | ||
128 | break; | ||
129 | } | ||
130 | sem->sleepers = 1; /* us - see -1 above */ | ||
131 | spin_unlock_irq(&semaphore_lock); | ||
132 | |||
133 | schedule(); | ||
134 | tsk->state = TASK_INTERRUPTIBLE; | ||
135 | spin_lock_irq(&semaphore_lock); | ||
136 | } | ||
137 | spin_unlock_irq(&semaphore_lock); | ||
138 | tsk->state = TASK_RUNNING; | ||
139 | remove_wait_queue(&sem->wait, &wait); | ||
140 | wake_up(&sem->wait); | ||
141 | return retval; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Trylock failed - make sure we correct for | ||
146 | * having decremented the count. | ||
147 | */ | ||
148 | int __down_trylock(struct semaphore * sem) | ||
149 | { | ||
150 | unsigned long flags; | ||
151 | int sleepers; | ||
152 | |||
153 | spin_lock_irqsave(&semaphore_lock, flags); | ||
154 | sleepers = sem->sleepers + 1; | ||
155 | sem->sleepers = 0; | ||
156 | |||
157 | /* | ||
158 | * Add "everybody else" and us into it. They aren't | ||
159 | * playing, because we own the spinlock. | ||
160 | */ | ||
161 | if (!atomic_add_negative(sleepers, &sem->count)) | ||
162 | wake_up(&sem->wait); | ||
163 | |||
164 | spin_unlock_irqrestore(&semaphore_lock, flags); | ||
165 | return 1; | ||
166 | } | ||
diff --git a/arch/v850/kernel/v850_ksyms.c b/arch/v850/kernel/v850_ksyms.c index 93575fdc874d..8d386a5dbc4a 100644 --- a/arch/v850/kernel/v850_ksyms.c +++ b/arch/v850/kernel/v850_ksyms.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <asm/pgalloc.h> | 11 | #include <asm/pgalloc.h> |
12 | #include <asm/irq.h> | 12 | #include <asm/irq.h> |
13 | #include <asm/io.h> | 13 | #include <asm/io.h> |
14 | #include <asm/semaphore.h> | ||
15 | #include <asm/checksum.h> | 14 | #include <asm/checksum.h> |
16 | #include <asm/current.h> | 15 | #include <asm/current.h> |
17 | 16 | ||
@@ -34,12 +33,6 @@ EXPORT_SYMBOL (memset); | |||
34 | EXPORT_SYMBOL (memcpy); | 33 | EXPORT_SYMBOL (memcpy); |
35 | EXPORT_SYMBOL (memmove); | 34 | EXPORT_SYMBOL (memmove); |
36 | 35 | ||
37 | /* semaphores */ | ||
38 | EXPORT_SYMBOL (__down); | ||
39 | EXPORT_SYMBOL (__down_interruptible); | ||
40 | EXPORT_SYMBOL (__down_trylock); | ||
41 | EXPORT_SYMBOL (__up); | ||
42 | |||
43 | /* | 36 | /* |
44 | * libgcc functions - functions that are used internally by the | 37 | * libgcc functions - functions that are used internally by the |
45 | * compiler... (prototypes are not correct though, but that | 38 | * compiler... (prototypes are not correct though, but that |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6c70fed0f9a0..e4b38861ea52 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -53,9 +53,6 @@ config STACKTRACE_SUPPORT | |||
53 | config HAVE_LATENCYTOP_SUPPORT | 53 | config HAVE_LATENCYTOP_SUPPORT |
54 | def_bool y | 54 | def_bool y |
55 | 55 | ||
56 | config SEMAPHORE_SLEEPERS | ||
57 | def_bool y | ||
58 | |||
59 | config FAST_CMPXCHG_LOCAL | 56 | config FAST_CMPXCHG_LOCAL |
60 | bool | 57 | bool |
61 | default y | 58 | default y |
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c index 061627806a2d..deb43785e923 100644 --- a/arch/x86/kernel/i386_ksyms_32.c +++ b/arch/x86/kernel/i386_ksyms_32.c | |||
@@ -1,13 +1,8 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <asm/semaphore.h> | ||
3 | #include <asm/checksum.h> | 2 | #include <asm/checksum.h> |
4 | #include <asm/desc.h> | 3 | #include <asm/desc.h> |
5 | #include <asm/pgtable.h> | 4 | #include <asm/pgtable.h> |
6 | 5 | ||
7 | EXPORT_SYMBOL(__down_failed); | ||
8 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
9 | EXPORT_SYMBOL(__down_failed_trylock); | ||
10 | EXPORT_SYMBOL(__up_wakeup); | ||
11 | /* Networking helper routines. */ | 6 | /* Networking helper routines. */ |
12 | EXPORT_SYMBOL(csum_partial_copy_generic); | 7 | EXPORT_SYMBOL(csum_partial_copy_generic); |
13 | 8 | ||
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c index a66e9c1a0537..95a993e18165 100644 --- a/arch/x86/kernel/x8664_ksyms_64.c +++ b/arch/x86/kernel/x8664_ksyms_64.c | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
5 | #include <linux/smp.h> | 5 | #include <linux/smp.h> |
6 | 6 | ||
7 | #include <asm/semaphore.h> | ||
8 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
9 | #include <asm/uaccess.h> | 8 | #include <asm/uaccess.h> |
10 | #include <asm/pgtable.h> | 9 | #include <asm/pgtable.h> |
@@ -12,11 +11,6 @@ | |||
12 | 11 | ||
13 | EXPORT_SYMBOL(kernel_thread); | 12 | EXPORT_SYMBOL(kernel_thread); |
14 | 13 | ||
15 | EXPORT_SYMBOL(__down_failed); | ||
16 | EXPORT_SYMBOL(__down_failed_interruptible); | ||
17 | EXPORT_SYMBOL(__down_failed_trylock); | ||
18 | EXPORT_SYMBOL(__up_wakeup); | ||
19 | |||
20 | EXPORT_SYMBOL(__get_user_1); | 14 | EXPORT_SYMBOL(__get_user_1); |
21 | EXPORT_SYMBOL(__get_user_2); | 15 | EXPORT_SYMBOL(__get_user_2); |
22 | EXPORT_SYMBOL(__get_user_4); | 16 | EXPORT_SYMBOL(__get_user_4); |
diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S index 3899bd37fdf0..648fe4741782 100644 --- a/arch/x86/lib/semaphore_32.S +++ b/arch/x86/lib/semaphore_32.S | |||
@@ -30,89 +30,6 @@ | |||
30 | * value or just clobbered.. | 30 | * value or just clobbered.. |
31 | */ | 31 | */ |
32 | .section .sched.text, "ax" | 32 | .section .sched.text, "ax" |
33 | ENTRY(__down_failed) | ||
34 | CFI_STARTPROC | ||
35 | FRAME | ||
36 | pushl %edx | ||
37 | CFI_ADJUST_CFA_OFFSET 4 | ||
38 | CFI_REL_OFFSET edx,0 | ||
39 | pushl %ecx | ||
40 | CFI_ADJUST_CFA_OFFSET 4 | ||
41 | CFI_REL_OFFSET ecx,0 | ||
42 | call __down | ||
43 | popl %ecx | ||
44 | CFI_ADJUST_CFA_OFFSET -4 | ||
45 | CFI_RESTORE ecx | ||
46 | popl %edx | ||
47 | CFI_ADJUST_CFA_OFFSET -4 | ||
48 | CFI_RESTORE edx | ||
49 | ENDFRAME | ||
50 | ret | ||
51 | CFI_ENDPROC | ||
52 | ENDPROC(__down_failed) | ||
53 | |||
54 | ENTRY(__down_failed_interruptible) | ||
55 | CFI_STARTPROC | ||
56 | FRAME | ||
57 | pushl %edx | ||
58 | CFI_ADJUST_CFA_OFFSET 4 | ||
59 | CFI_REL_OFFSET edx,0 | ||
60 | pushl %ecx | ||
61 | CFI_ADJUST_CFA_OFFSET 4 | ||
62 | CFI_REL_OFFSET ecx,0 | ||
63 | call __down_interruptible | ||
64 | popl %ecx | ||
65 | CFI_ADJUST_CFA_OFFSET -4 | ||
66 | CFI_RESTORE ecx | ||
67 | popl %edx | ||
68 | CFI_ADJUST_CFA_OFFSET -4 | ||
69 | CFI_RESTORE edx | ||
70 | ENDFRAME | ||
71 | ret | ||
72 | CFI_ENDPROC | ||
73 | ENDPROC(__down_failed_interruptible) | ||
74 | |||
75 | ENTRY(__down_failed_trylock) | ||
76 | CFI_STARTPROC | ||
77 | FRAME | ||
78 | pushl %edx | ||
79 | CFI_ADJUST_CFA_OFFSET 4 | ||
80 | CFI_REL_OFFSET edx,0 | ||
81 | pushl %ecx | ||
82 | CFI_ADJUST_CFA_OFFSET 4 | ||
83 | CFI_REL_OFFSET ecx,0 | ||
84 | call __down_trylock | ||
85 | popl %ecx | ||
86 | CFI_ADJUST_CFA_OFFSET -4 | ||
87 | CFI_RESTORE ecx | ||
88 | popl %edx | ||
89 | CFI_ADJUST_CFA_OFFSET -4 | ||
90 | CFI_RESTORE edx | ||
91 | ENDFRAME | ||
92 | ret | ||
93 | CFI_ENDPROC | ||
94 | ENDPROC(__down_failed_trylock) | ||
95 | |||
96 | ENTRY(__up_wakeup) | ||
97 | CFI_STARTPROC | ||
98 | FRAME | ||
99 | pushl %edx | ||
100 | CFI_ADJUST_CFA_OFFSET 4 | ||
101 | CFI_REL_OFFSET edx,0 | ||
102 | pushl %ecx | ||
103 | CFI_ADJUST_CFA_OFFSET 4 | ||
104 | CFI_REL_OFFSET ecx,0 | ||
105 | call __up | ||
106 | popl %ecx | ||
107 | CFI_ADJUST_CFA_OFFSET -4 | ||
108 | CFI_RESTORE ecx | ||
109 | popl %edx | ||
110 | CFI_ADJUST_CFA_OFFSET -4 | ||
111 | CFI_RESTORE edx | ||
112 | ENDFRAME | ||
113 | ret | ||
114 | CFI_ENDPROC | ||
115 | ENDPROC(__up_wakeup) | ||
116 | 33 | ||
117 | /* | 34 | /* |
118 | * rw spinlock fallbacks | 35 | * rw spinlock fallbacks |
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index 8b92d428ab02..e009251d4e9f 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S | |||
@@ -41,11 +41,6 @@ | |||
41 | thunk rwsem_downgrade_thunk,rwsem_downgrade_wake | 41 | thunk rwsem_downgrade_thunk,rwsem_downgrade_wake |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | thunk __down_failed,__down | ||
45 | thunk_retrax __down_failed_interruptible,__down_interruptible | ||
46 | thunk_retrax __down_failed_trylock,__down_trylock | ||
47 | thunk __up_wakeup,__up | ||
48 | |||
49 | #ifdef CONFIG_TRACE_IRQFLAGS | 44 | #ifdef CONFIG_TRACE_IRQFLAGS |
50 | thunk trace_hardirqs_on_thunk,trace_hardirqs_on | 45 | thunk trace_hardirqs_on_thunk,trace_hardirqs_on |
51 | thunk trace_hardirqs_off_thunk,trace_hardirqs_off | 46 | thunk trace_hardirqs_off_thunk,trace_hardirqs_off |
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile index f582d6a24ec2..7419dbccf027 100644 --- a/arch/xtensa/kernel/Makefile +++ b/arch/xtensa/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | extra-y := head.o vmlinux.lds | 5 | extra-y := head.o vmlinux.lds |
6 | 6 | ||
7 | 7 | ||
8 | obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o semaphore.o \ | 8 | obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \ |
9 | setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ | 9 | setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ |
10 | pci-dma.o init_task.o io.o | 10 | pci-dma.o init_task.o io.o |
11 | 11 | ||
diff --git a/arch/xtensa/kernel/semaphore.c b/arch/xtensa/kernel/semaphore.c deleted file mode 100644 index 995c6410ae10..000000000000 --- a/arch/xtensa/kernel/semaphore.c +++ /dev/null | |||
@@ -1,226 +0,0 @@ | |||
1 | /* | ||
2 | * arch/xtensa/kernel/semaphore.c | ||
3 | * | ||
4 | * Generic semaphore code. Buyer beware. Do your own specific changes | ||
5 | * in <asm/semaphore-helper.h> | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | * Copyright (C) 2001 - 2005 Tensilica Inc. | ||
12 | * | ||
13 | * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> | ||
14 | * Chris Zankel <chris@zankel.net> | ||
15 | * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca> | ||
16 | * Kevin Chea | ||
17 | */ | ||
18 | |||
19 | #include <linux/sched.h> | ||
20 | #include <linux/wait.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <asm/semaphore.h> | ||
23 | #include <asm/errno.h> | ||
24 | |||
25 | /* | ||
26 | * These two _must_ execute atomically wrt each other. | ||
27 | */ | ||
28 | |||
29 | static __inline__ void wake_one_more(struct semaphore * sem) | ||
30 | { | ||
31 | atomic_inc((atomic_t *)&sem->sleepers); | ||
32 | } | ||
33 | |||
34 | static __inline__ int waking_non_zero(struct semaphore *sem) | ||
35 | { | ||
36 | unsigned long flags; | ||
37 | int ret = 0; | ||
38 | |||
39 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
40 | if (sem->sleepers > 0) { | ||
41 | sem->sleepers--; | ||
42 | ret = 1; | ||
43 | } | ||
44 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
45 | return ret; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * waking_non_zero_interruptible: | ||
50 | * 1 got the lock | ||
51 | * 0 go to sleep | ||
52 | * -EINTR interrupted | ||
53 | * | ||
54 | * We must undo the sem->count down_interruptible() increment while we are | ||
55 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
56 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
57 | */ | ||
58 | |||
59 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | ||
60 | struct task_struct *tsk) | ||
61 | { | ||
62 | unsigned long flags; | ||
63 | int ret = 0; | ||
64 | |||
65 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
66 | if (sem->sleepers > 0) { | ||
67 | sem->sleepers--; | ||
68 | ret = 1; | ||
69 | } else if (signal_pending(tsk)) { | ||
70 | atomic_inc(&sem->count); | ||
71 | ret = -EINTR; | ||
72 | } | ||
73 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * waking_non_zero_trylock: | ||
79 | * 1 failed to lock | ||
80 | * 0 got the lock | ||
81 | * | ||
82 | * We must undo the sem->count down_trylock() increment while we are | ||
83 | * protected by the spinlock in order to make atomic this atomic_inc() with the | ||
84 | * atomic_read() in wake_one_more(), otherwise we can race. -arca | ||
85 | */ | ||
86 | |||
87 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | int ret = 1; | ||
91 | |||
92 | spin_lock_irqsave(&semaphore_wake_lock, flags); | ||
93 | if (sem->sleepers <= 0) | ||
94 | atomic_inc(&sem->count); | ||
95 | else { | ||
96 | sem->sleepers--; | ||
97 | ret = 0; | ||
98 | } | ||
99 | spin_unlock_irqrestore(&semaphore_wake_lock, flags); | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | DEFINE_SPINLOCK(semaphore_wake_lock); | ||
104 | |||
105 | /* | ||
106 | * Semaphores are implemented using a two-way counter: | ||
107 | * The "count" variable is decremented for each process | ||
108 | * that tries to sleep, while the "waking" variable is | ||
109 | * incremented when the "up()" code goes to wake up waiting | ||
110 | * processes. | ||
111 | * | ||
112 | * Notably, the inline "up()" and "down()" functions can | ||
113 | * efficiently test if they need to do any extra work (up | ||
114 | * needs to do something only if count was negative before | ||
115 | * the increment operation. | ||
116 | * | ||
117 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
118 | * atomically. | ||
119 | * | ||
120 | * When __up() is called, the count was negative before | ||
121 | * incrementing it, and we need to wake up somebody. | ||
122 | * | ||
123 | * This routine adds one to the count of processes that need to | ||
124 | * wake up and exit. ALL waiting processes actually wake up but | ||
125 | * only the one that gets to the "waking" field first will gate | ||
126 | * through and acquire the semaphore. The others will go back | ||
127 | * to sleep. | ||
128 | * | ||
129 | * Note that these functions are only called when there is | ||
130 | * contention on the lock, and as such all this is the | ||
131 | * "non-critical" part of the whole semaphore business. The | ||
132 | * critical part is the inline stuff in <asm/semaphore.h> | ||
133 | * where we want to avoid any extra jumps and calls. | ||
134 | */ | ||
135 | |||
136 | void __up(struct semaphore *sem) | ||
137 | { | ||
138 | wake_one_more(sem); | ||
139 | wake_up(&sem->wait); | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Perform the "down" function. Return zero for semaphore acquired, | ||
144 | * return negative for signalled out of the function. | ||
145 | * | ||
146 | * If called from __down, the return is ignored and the wait loop is | ||
147 | * not interruptible. This means that a task waiting on a semaphore | ||
148 | * using "down()" cannot be killed until someone does an "up()" on | ||
149 | * the semaphore. | ||
150 | * | ||
151 | * If called from __down_interruptible, the return value gets checked | ||
152 | * upon return. If the return value is negative then the task continues | ||
153 | * with the negative value in the return register (it can be tested by | ||
154 | * the caller). | ||
155 | * | ||
156 | * Either form may be used in conjunction with "up()". | ||
157 | * | ||
158 | */ | ||
159 | |||
160 | #define DOWN_VAR \ | ||
161 | struct task_struct *tsk = current; \ | ||
162 | wait_queue_t wait; \ | ||
163 | init_waitqueue_entry(&wait, tsk); | ||
164 | |||
165 | #define DOWN_HEAD(task_state) \ | ||
166 | \ | ||
167 | \ | ||
168 | tsk->state = (task_state); \ | ||
169 | add_wait_queue(&sem->wait, &wait); \ | ||
170 | \ | ||
171 | /* \ | ||
172 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
173 | * so we must wait. \ | ||
174 | * \ | ||
175 | * We can let go the lock for purposes of waiting. \ | ||
176 | * We re-acquire it after awaking so as to protect \ | ||
177 | * all semaphore operations. \ | ||
178 | * \ | ||
179 | * If "up()" is called before we call waking_non_zero() then \ | ||
180 | * we will catch it right away. If it is called later then \ | ||
181 | * we will have to go through a wakeup cycle to catch it. \ | ||
182 | * \ | ||
183 | * Multiple waiters contend for the semaphore lock to see \ | ||
184 | * who gets to gate through and who has to wait some more. \ | ||
185 | */ \ | ||
186 | for (;;) { | ||
187 | |||
188 | #define DOWN_TAIL(task_state) \ | ||
189 | tsk->state = (task_state); \ | ||
190 | } \ | ||
191 | tsk->state = TASK_RUNNING; \ | ||
192 | remove_wait_queue(&sem->wait, &wait); | ||
193 | |||
194 | void __sched __down(struct semaphore * sem) | ||
195 | { | ||
196 | DOWN_VAR | ||
197 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
198 | if (waking_non_zero(sem)) | ||
199 | break; | ||
200 | schedule(); | ||
201 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
202 | } | ||
203 | |||
204 | int __sched __down_interruptible(struct semaphore * sem) | ||
205 | { | ||
206 | int ret = 0; | ||
207 | DOWN_VAR | ||
208 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
209 | |||
210 | ret = waking_non_zero_interruptible(sem, tsk); | ||
211 | if (ret) | ||
212 | { | ||
213 | if (ret == 1) | ||
214 | /* ret != 0 only if we get interrupted -arca */ | ||
215 | ret = 0; | ||
216 | break; | ||
217 | } | ||
218 | schedule(); | ||
219 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | int __down_trylock(struct semaphore * sem) | ||
224 | { | ||
225 | return waking_non_zero_trylock(sem); | ||
226 | } | ||
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 60dbdb43fb4c..6e52cdd6166f 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/page.h> | 27 | #include <asm/page.h> |
28 | #include <asm/pgalloc.h> | 28 | #include <asm/pgalloc.h> |
29 | #include <asm/semaphore.h> | ||
30 | #ifdef CONFIG_BLK_DEV_FD | 29 | #ifdef CONFIG_BLK_DEV_FD |
31 | #include <asm/floppy.h> | 30 | #include <asm/floppy.h> |
32 | #endif | 31 | #endif |
@@ -71,14 +70,6 @@ EXPORT_SYMBOL(__umodsi3); | |||
71 | EXPORT_SYMBOL(__udivdi3); | 70 | EXPORT_SYMBOL(__udivdi3); |
72 | EXPORT_SYMBOL(__umoddi3); | 71 | EXPORT_SYMBOL(__umoddi3); |
73 | 72 | ||
74 | /* | ||
75 | * Semaphore operations | ||
76 | */ | ||
77 | EXPORT_SYMBOL(__down); | ||
78 | EXPORT_SYMBOL(__down_interruptible); | ||
79 | EXPORT_SYMBOL(__down_trylock); | ||
80 | EXPORT_SYMBOL(__up); | ||
81 | |||
82 | #ifdef CONFIG_NET | 73 | #ifdef CONFIG_NET |
83 | /* | 74 | /* |
84 | * Networking support | 75 | * Networking support |