aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew@wil.cx>2008-03-07 21:55:58 -0500
committerMatthew Wilcox <willy@linux.intel.com>2008-04-17 10:42:34 -0400
commit64ac24e738823161693bf791f87adc802cf529ff (patch)
tree19c0b0cf314d4394ca580c05b86cdf874ce0a167 /arch/ia64
parente48b3deee475134585eed03e7afebe4bf9e0dba9 (diff)
Generic semaphore implementation
Semaphores are no longer performance-critical, so a generic C implementation is better for maintainability, debuggability and extensibility. Thanks to Peter Zijlstra for fixing the lockdep warning. Thanks to Harvey Harrison for pointing out that the unlikely() was unnecessary. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c6
-rw-r--r--arch/ia64/kernel/semaphore.c165
3 files changed, 1 insertions, 172 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 33e5a598672d..13fd10e8699e 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 7obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ 8 irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
9 salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 9 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
10 unwind.o mca.o mca_asm.o topology.o 10 unwind.o mca.o mca_asm.o topology.o
11 11
12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 12obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 8e7193d55528..6da1f20d7372 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -19,12 +19,6 @@ EXPORT_SYMBOL_GPL(empty_zero_page);
19EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */ 19EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
20EXPORT_SYMBOL(csum_ipv6_magic); 20EXPORT_SYMBOL(csum_ipv6_magic);
21 21
22#include <asm/semaphore.h>
23EXPORT_SYMBOL(__down);
24EXPORT_SYMBOL(__down_interruptible);
25EXPORT_SYMBOL(__down_trylock);
26EXPORT_SYMBOL(__up);
27
28#include <asm/page.h> 22#include <asm/page.h>
29EXPORT_SYMBOL(clear_page); 23EXPORT_SYMBOL(clear_page);
30 24
diff --git a/arch/ia64/kernel/semaphore.c b/arch/ia64/kernel/semaphore.c
deleted file mode 100644
index 2724ef3fbae2..000000000000
--- a/arch/ia64/kernel/semaphore.c
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * IA-64 semaphore implementation (derived from x86 version).
3 *
4 * Copyright (C) 1999-2000, 2002 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 */
7
8/*
9 * Semaphores are implemented using a two-way counter: The "count"
10 * variable is decremented for each process that tries to acquire the
11 * semaphore, while the "sleepers" variable is a count of such
12 * acquires.
13 *
14 * Notably, the inline "up()" and "down()" functions can efficiently
15 * test if they need to do any extra work (up needs to do something
16 * only if count was negative before the increment operation.
17 *
18 * "sleeping" and the contention routine ordering is protected
19 * by the spinlock in the semaphore's waitqueue head.
20 *
21 * Note that these functions are only called when there is contention
22 * on the lock, and as such all this is the "non-critical" part of the
23 * whole semaphore business. The critical part is the inline stuff in
24 * <asm/semaphore.h> where we want to avoid any extra jumps and calls.
25 */
26#include <linux/sched.h>
27#include <linux/init.h>
28
29#include <asm/errno.h>
30#include <asm/semaphore.h>
31
32/*
33 * Logic:
34 * - Only on a boundary condition do we need to care. When we go
35 * from a negative count to a non-negative, we wake people up.
36 * - When we go from a non-negative count to a negative do we
37 * (a) synchronize with the "sleepers" count and (b) make sure
38 * that we're on the wakeup list before we synchronize so that
39 * we cannot lose wakeup events.
40 */
41
42void
43__up (struct semaphore *sem)
44{
45 wake_up(&sem->wait);
46}
47
48void __sched __down (struct semaphore *sem)
49{
50 struct task_struct *tsk = current;
51 DECLARE_WAITQUEUE(wait, tsk);
52 unsigned long flags;
53
54 tsk->state = TASK_UNINTERRUPTIBLE;
55 spin_lock_irqsave(&sem->wait.lock, flags);
56 add_wait_queue_exclusive_locked(&sem->wait, &wait);
57
58 sem->sleepers++;
59 for (;;) {
60 int sleepers = sem->sleepers;
61
62 /*
63 * Add "everybody else" into it. They aren't
64 * playing, because we own the spinlock in
65 * the wait_queue_head.
66 */
67 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
68 sem->sleepers = 0;
69 break;
70 }
71 sem->sleepers = 1; /* us - see -1 above */
72 spin_unlock_irqrestore(&sem->wait.lock, flags);
73
74 schedule();
75
76 spin_lock_irqsave(&sem->wait.lock, flags);
77 tsk->state = TASK_UNINTERRUPTIBLE;
78 }
79 remove_wait_queue_locked(&sem->wait, &wait);
80 wake_up_locked(&sem->wait);
81 spin_unlock_irqrestore(&sem->wait.lock, flags);
82 tsk->state = TASK_RUNNING;
83}
84
85int __sched __down_interruptible (struct semaphore * sem)
86{
87 int retval = 0;
88 struct task_struct *tsk = current;
89 DECLARE_WAITQUEUE(wait, tsk);
90 unsigned long flags;
91
92 tsk->state = TASK_INTERRUPTIBLE;
93 spin_lock_irqsave(&sem->wait.lock, flags);
94 add_wait_queue_exclusive_locked(&sem->wait, &wait);
95
96 sem->sleepers ++;
97 for (;;) {
98 int sleepers = sem->sleepers;
99
100 /*
101 * With signals pending, this turns into
102 * the trylock failure case - we won't be
103 * sleeping, and we* can't get the lock as
104 * it has contention. Just correct the count
105 * and exit.
106 */
107 if (signal_pending(current)) {
108 retval = -EINTR;
109 sem->sleepers = 0;
110 atomic_add(sleepers, &sem->count);
111 break;
112 }
113
114 /*
115 * Add "everybody else" into it. They aren't
116 * playing, because we own the spinlock in
117 * wait_queue_head. The "-1" is because we're
118 * still hoping to get the semaphore.
119 */
120 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
121 sem->sleepers = 0;
122 break;
123 }
124 sem->sleepers = 1; /* us - see -1 above */
125 spin_unlock_irqrestore(&sem->wait.lock, flags);
126
127 schedule();
128
129 spin_lock_irqsave(&sem->wait.lock, flags);
130 tsk->state = TASK_INTERRUPTIBLE;
131 }
132 remove_wait_queue_locked(&sem->wait, &wait);
133 wake_up_locked(&sem->wait);
134 spin_unlock_irqrestore(&sem->wait.lock, flags);
135
136 tsk->state = TASK_RUNNING;
137 return retval;
138}
139
140/*
141 * Trylock failed - make sure we correct for having decremented the
142 * count.
143 */
144int
145__down_trylock (struct semaphore *sem)
146{
147 unsigned long flags;
148 int sleepers;
149
150 spin_lock_irqsave(&sem->wait.lock, flags);
151 sleepers = sem->sleepers + 1;
152 sem->sleepers = 0;
153
154 /*
155 * Add "everybody else" and us into it. They aren't
156 * playing, because we own the spinlock in the
157 * wait_queue_head.
158 */
159 if (!atomic_add_negative(sleepers, &sem->count)) {
160 wake_up_locked(&sem->wait);
161 }
162
163 spin_unlock_irqrestore(&sem->wait.lock, flags);
164 return 1;
165}