diff options
author | Benjamin LaHaise <bcrl@kvack.org> | 2005-09-03 18:56:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@evo.osdl.org> | 2005-09-05 03:06:14 -0400 |
commit | 52fdd08903a1d1162e184114837e232640191627 (patch) | |
tree | 9469f521b7ba12ac48337155bc5a65049d361229 /arch/i386/kernel | |
parent | 4ad8d38342430f8b52f7a8458dce90caf8c8ca64 (diff) |
[PATCH] unify x86/x86-64 semaphore code
This patch moves the common code in x86 and x86-64's semaphore.c into a
single file in lib/semaphore-sleepers.c. The arch specific asm stubs are
left in the arch tree (in semaphore.c for i386 and in the asm for x86-64).
There should be no changes in code/functionality with this patch.
Signed-off-by: Benjamin LaHaise <benjamin.c.lahaise@intel.com>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Jeff Dike <jdike@addtoit.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/semaphore.c | 162 |
1 files changed, 0 insertions, 162 deletions
diff --git a/arch/i386/kernel/semaphore.c b/arch/i386/kernel/semaphore.c index 469f496e55c0..7455ab643943 100644 --- a/arch/i386/kernel/semaphore.c +++ b/arch/i386/kernel/semaphore.c | |||
@@ -13,171 +13,9 @@ | |||
13 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> | 13 | * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org> |
14 | */ | 14 | */ |
15 | #include <linux/config.h> | 15 | #include <linux/config.h> |
16 | #include <linux/sched.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <asm/semaphore.h> | 16 | #include <asm/semaphore.h> |
20 | 17 | ||
21 | /* | 18 | /* |
22 | * Semaphores are implemented using a two-way counter: | ||
23 | * The "count" variable is decremented for each process | ||
24 | * that tries to acquire the semaphore, while the "sleeping" | ||
25 | * variable is a count of such acquires. | ||
26 | * | ||
27 | * Notably, the inline "up()" and "down()" functions can | ||
28 | * efficiently test if they need to do any extra work (up | ||
29 | * needs to do something only if count was negative before | ||
30 | * the increment operation. | ||
31 | * | ||
32 | * "sleeping" and the contention routine ordering is protected | ||
33 | * by the spinlock in the semaphore's waitqueue head. | ||
34 | * | ||
35 | * Note that these functions are only called when there is | ||
36 | * contention on the lock, and as such all this is the | ||
37 | * "non-critical" part of the whole semaphore business. The | ||
38 | * critical part is the inline stuff in <asm/semaphore.h> | ||
39 | * where we want to avoid any extra jumps and calls. | ||
40 | */ | ||
41 | |||
42 | /* | ||
43 | * Logic: | ||
44 | * - only on a boundary condition do we need to care. When we go | ||
45 | * from a negative count to a non-negative, we wake people up. | ||
46 | * - when we go from a non-negative count to a negative do we | ||
47 | * (a) synchronize with the "sleeper" count and (b) make sure | ||
48 | * that we're on the wakeup list before we synchronize so that | ||
49 | * we cannot lose wakeup events. | ||
50 | */ | ||
51 | |||
52 | static fastcall void __attribute_used__ __up(struct semaphore *sem) | ||
53 | { | ||
54 | wake_up(&sem->wait); | ||
55 | } | ||
56 | |||
57 | static fastcall void __attribute_used__ __sched __down(struct semaphore * sem) | ||
58 | { | ||
59 | struct task_struct *tsk = current; | ||
60 | DECLARE_WAITQUEUE(wait, tsk); | ||
61 | unsigned long flags; | ||
62 | |||
63 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
64 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
65 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
66 | |||
67 | sem->sleepers++; | ||
68 | for (;;) { | ||
69 | int sleepers = sem->sleepers; | ||
70 | |||
71 | /* | ||
72 | * Add "everybody else" into it. They aren't | ||
73 | * playing, because we own the spinlock in | ||
74 | * the wait_queue_head. | ||
75 | */ | ||
76 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
77 | sem->sleepers = 0; | ||
78 | break; | ||
79 | } | ||
80 | sem->sleepers = 1; /* us - see -1 above */ | ||
81 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
82 | |||
83 | schedule(); | ||
84 | |||
85 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
86 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
87 | } | ||
88 | remove_wait_queue_locked(&sem->wait, &wait); | ||
89 | wake_up_locked(&sem->wait); | ||
90 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
91 | tsk->state = TASK_RUNNING; | ||
92 | } | ||
93 | |||
94 | static fastcall int __attribute_used__ __sched __down_interruptible(struct semaphore * sem) | ||
95 | { | ||
96 | int retval = 0; | ||
97 | struct task_struct *tsk = current; | ||
98 | DECLARE_WAITQUEUE(wait, tsk); | ||
99 | unsigned long flags; | ||
100 | |||
101 | tsk->state = TASK_INTERRUPTIBLE; | ||
102 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
103 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
104 | |||
105 | sem->sleepers++; | ||
106 | for (;;) { | ||
107 | int sleepers = sem->sleepers; | ||
108 | |||
109 | /* | ||
110 | * With signals pending, this turns into | ||
111 | * the trylock failure case - we won't be | ||
112 | * sleeping, and we* can't get the lock as | ||
113 | * it has contention. Just correct the count | ||
114 | * and exit. | ||
115 | */ | ||
116 | if (signal_pending(current)) { | ||
117 | retval = -EINTR; | ||
118 | sem->sleepers = 0; | ||
119 | atomic_add(sleepers, &sem->count); | ||
120 | break; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Add "everybody else" into it. They aren't | ||
125 | * playing, because we own the spinlock in | ||
126 | * wait_queue_head. The "-1" is because we're | ||
127 | * still hoping to get the semaphore. | ||
128 | */ | ||
129 | if (!atomic_add_negative(sleepers - 1, &sem->count)) { | ||
130 | sem->sleepers = 0; | ||
131 | break; | ||
132 | } | ||
133 | sem->sleepers = 1; /* us - see -1 above */ | ||
134 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
135 | |||
136 | schedule(); | ||
137 | |||
138 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
139 | tsk->state = TASK_INTERRUPTIBLE; | ||
140 | } | ||
141 | remove_wait_queue_locked(&sem->wait, &wait); | ||
142 | wake_up_locked(&sem->wait); | ||
143 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
144 | |||
145 | tsk->state = TASK_RUNNING; | ||
146 | return retval; | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Trylock failed - make sure we correct for | ||
151 | * having decremented the count. | ||
152 | * | ||
153 | * We could have done the trylock with a | ||
154 | * single "cmpxchg" without failure cases, | ||
155 | * but then it wouldn't work on a 386. | ||
156 | */ | ||
157 | static fastcall int __attribute_used__ __down_trylock(struct semaphore * sem) | ||
158 | { | ||
159 | int sleepers; | ||
160 | unsigned long flags; | ||
161 | |||
162 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
163 | sleepers = sem->sleepers + 1; | ||
164 | sem->sleepers = 0; | ||
165 | |||
166 | /* | ||
167 | * Add "everybody else" and us into it. They aren't | ||
168 | * playing, because we own the spinlock in the | ||
169 | * wait_queue_head. | ||
170 | */ | ||
171 | if (!atomic_add_negative(sleepers, &sem->count)) { | ||
172 | wake_up_locked(&sem->wait); | ||
173 | } | ||
174 | |||
175 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
176 | return 1; | ||
177 | } | ||
178 | |||
179 | |||
180 | /* | ||
181 | * The semaphore operations have a special calling sequence that | 19 | * The semaphore operations have a special calling sequence that |
182 | * allow us to do a simpler in-line version of them. These routines | 20 | * allow us to do a simpler in-line version of them. These routines |
183 | * need to convert that sequence back into the C sequence when | 21 | * need to convert that sequence back into the C sequence when |